DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>
Cc: <ndabilpuram@marvell.com>, <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 34/36] event/cnxk: add Rx adapter fastpath ops
Date: Sat, 6 Mar 2021 21:59:39 +0530	[thread overview]
Message-ID: <20210306162942.6845-35-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20210306162942.6845-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add support for event eth Rx adapter fastpath operations.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c | 115 ++++++++-
 drivers/event/cnxk/cn10k_worker.c   | 164 +++++++++----
 drivers/event/cnxk/cn10k_worker.h   |  91 +++++--
 drivers/event/cnxk/cn9k_eventdev.c  | 254 ++++++++++++++++++-
 drivers/event/cnxk/cn9k_worker.c    | 364 +++++++++++++++++++---------
 drivers/event/cnxk/cn9k_worker.h    | 158 +++++++++---
 drivers/event/cnxk/meson.build      |   8 +
 7 files changed, 932 insertions(+), 222 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 72175e16f..70c6fedae 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -247,17 +247,120 @@ static void
 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	const event_dequeue_t sso_hws_deq[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_tmo_deq[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn10k_sso_hws_tmo_deq_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_tmo_deq_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn10k_sso_hws_tmo_deq_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_deq_seg[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_tmo_deq_seg[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn10k_sso_hws_tmo_deq_seg_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_tmo_deq_seg_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn10k_sso_hws_tmo_deq_seg_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
 
 	event_dev->enqueue = cn10k_sso_hws_enq;
 	event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
 	event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
 	event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
-
-	event_dev->dequeue = cn10k_sso_hws_deq;
-	event_dev->dequeue_burst = cn10k_sso_hws_deq_burst;
-	if (dev->is_timeout_deq) {
-		event_dev->dequeue = cn10k_sso_hws_tmo_deq;
-		event_dev->dequeue_burst = cn10k_sso_hws_tmo_deq_burst;
+	if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
+		event_dev->dequeue = sso_hws_deq_seg
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		event_dev->dequeue_burst = sso_hws_deq_seg_burst
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		if (dev->is_timeout_deq) {
+			event_dev->dequeue = sso_hws_tmo_deq_seg
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+			event_dev->dequeue_burst = sso_hws_tmo_deq_seg_burst
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		}
+	} else {
+		event_dev->dequeue = sso_hws_deq
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		event_dev->dequeue_burst = sso_hws_deq_burst
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		if (dev->is_timeout_deq) {
+			event_dev->dequeue = sso_hws_tmo_deq
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+			event_dev->dequeue_burst = sso_hws_tmo_deq_burst
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		}
 	}
 }
 
diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c
index 57b0714bb..46f72cf20 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -60,56 +60,118 @@ cn10k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
 	return 1;
 }
 
-uint16_t __rte_hot
-cn10k_sso_hws_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct cn10k_sso_hws *ws = port;
-
-	RTE_SET_USED(timeout_ticks);
-
-	if (ws->swtag_req) {
-		ws->swtag_req = 0;
-		cnxk_sso_hws_swtag_wait(ws->tag_wqe_op);
-		return 1;
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	uint16_t __rte_hot cn10k_sso_hws_deq_##name(                           \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn10k_sso_hws *ws = port;                               \
+                                                                               \
+		RTE_SET_USED(timeout_ticks);                                   \
+                                                                               \
+		if (ws->swtag_req) {                                           \
+			ws->swtag_req = 0;                                     \
+			cnxk_sso_hws_swtag_wait(ws->tag_wqe_op);               \
+			return 1;                                              \
+		}                                                              \
+                                                                               \
+		return cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);  \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name(                     \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn10k_sso_hws_deq_##name(port, ev, timeout_ticks);      \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn10k_sso_hws_tmo_deq_##name(                       \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn10k_sso_hws *ws = port;                               \
+		uint16_t ret = 1;                                              \
+		uint64_t iter;                                                 \
+                                                                               \
+		if (ws->swtag_req) {                                           \
+			ws->swtag_req = 0;                                     \
+			cnxk_sso_hws_swtag_wait(ws->tag_wqe_op);               \
+			return ret;                                            \
+		}                                                              \
+                                                                               \
+		ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);   \
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)     \
+			ret = cn10k_sso_hws_get_work(ws, ev, flags,            \
+						     ws->lookup_mem);          \
+                                                                               \
+		return ret;                                                    \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn10k_sso_hws_tmo_deq_burst_##name(                 \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn10k_sso_hws_tmo_deq_##name(port, ev, timeout_ticks);  \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name(                       \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn10k_sso_hws *ws = port;                               \
+                                                                               \
+		RTE_SET_USED(timeout_ticks);                                   \
+                                                                               \
+		if (ws->swtag_req) {                                           \
+			ws->swtag_req = 0;                                     \
+			cnxk_sso_hws_swtag_wait(ws->tag_wqe_op);               \
+			return 1;                                              \
+		}                                                              \
+                                                                               \
+		return cn10k_sso_hws_get_work(                                 \
+			ws, ev, flags | NIX_RX_MULTI_SEG_F, ws->lookup_mem);   \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name(                 \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn10k_sso_hws_deq_seg_##name(port, ev, timeout_ticks);  \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn10k_sso_hws_tmo_deq_seg_##name(                   \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn10k_sso_hws *ws = port;                               \
+		uint16_t ret = 1;                                              \
+		uint64_t iter;                                                 \
+                                                                               \
+		if (ws->swtag_req) {                                           \
+			ws->swtag_req = 0;                                     \
+			cnxk_sso_hws_swtag_wait(ws->tag_wqe_op);               \
+			return ret;                                            \
+		}                                                              \
+                                                                               \
+		ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);   \
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)     \
+			ret = cn10k_sso_hws_get_work(ws, ev, flags,            \
+						     ws->lookup_mem);          \
+                                                                               \
+		return ret;                                                    \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn10k_sso_hws_tmo_deq_seg_burst_##name(             \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn10k_sso_hws_tmo_deq_seg_##name(port, ev,              \
+							timeout_ticks);        \
 	}
 
-	return cn10k_sso_hws_get_work(ws, ev);
-}
-
-uint16_t __rte_hot
-cn10k_sso_hws_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
-			uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return cn10k_sso_hws_deq(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-cn10k_sso_hws_tmo_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct cn10k_sso_hws *ws = port;
-	uint16_t ret = 1;
-	uint64_t iter;
-
-	if (ws->swtag_req) {
-		ws->swtag_req = 0;
-		cnxk_sso_hws_swtag_wait(ws->tag_wqe_op);
-		return ret;
-	}
-
-	ret = cn10k_sso_hws_get_work(ws, ev);
-	for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
-		ret = cn10k_sso_hws_get_work(ws, ev);
-
-	return ret;
-}
-
-uint16_t __rte_hot
-cn10k_sso_hws_tmo_deq_burst(void *port, struct rte_event ev[],
-			    uint16_t nb_events, uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return cn10k_sso_hws_tmo_deq(port, ev, timeout_ticks);
-}
+NIX_RX_FASTPATH_MODES
+#undef R
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index d418e80aa..9521a5c94 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -83,20 +83,40 @@ cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
 		cn10k_sso_hws_fwd_group(ws, ev, grp);
 }
 
+static __rte_always_inline void
+cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
+		  const uint32_t tag, const uint32_t flags,
+		  const void *const lookup_mem)
+{
+	union mbuf_initializer mbuf_init = {
+		.fields = {.data_off = RTE_PKTMBUF_HEADROOM,
+			   .refcnt = 1,
+			   .nb_segs = 1,
+			   .port = port_id},
+	};
+
+	cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
+			      (struct rte_mbuf *)mbuf, lookup_mem,
+			      mbuf_init.value, flags);
+}
+
 static __rte_always_inline uint16_t
-cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev)
+cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
+		       const uint32_t flags, void *lookup_mem)
 {
 	union {
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
+	uint64_t mbuf;
 
 	gw.get_work = ws->gw_wdata;
 #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
 	asm volatile(
 		PLT_CPU_FEATURE_PREAMBLE
 		"caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
-		: [wdata] "+r"(gw.get_work)
+		"sub %[mbuf], %H[wdata], #0x80				\n"
+		: [wdata] "+r"(gw.get_work), [mbuf] "=&r"(mbuf)
 		: [gw_loc] "r"(ws->getwrk_op)
 		: "memory");
 #else
@@ -104,11 +124,25 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev)
 	do {
 		roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
 	} while (gw.u64[0] & BIT_ULL(63));
+	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
 		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
 		    (gw.u64[0] & 0xffffffff);
 
+	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+		    RTE_EVENT_TYPE_ETHDEV) {
+			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+			cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+					  gw.u64[0] & 0xFFFFF, flags,
+					  lookup_mem);
+			gw.u64[1] = mbuf;
+		}
+	}
+
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
 
@@ -123,6 +157,7 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
+	uint64_t mbuf;
 
 #ifdef RTE_ARCH_ARM64
 	asm volatile(PLT_CPU_FEATURE_PREAMBLE
@@ -133,19 +168,34 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
 		     "		ldp %[tag], %[wqp], [%[tag_loc]]	\n"
 		     "		tbnz %[tag], 63, rty%=			\n"
 		     "done%=:	dmb ld					\n"
-		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
+		     "		sub %[mbuf], %[wqp], #0x80		\n"
+		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
+		       [mbuf] "=&r"(mbuf)
 		     : [tag_loc] "r"(ws->tag_wqe_op)
 		     : "memory");
 #else
 	do {
 		roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
 	} while (gw.u64[0] & BIT_ULL(63));
+	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
 	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
 		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
 		    (gw.u64[0] & 0xffffffff);
 
+	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+		    RTE_EVENT_TYPE_ETHDEV) {
+			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+			cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+					  gw.u64[0] & 0xFFFFF, 0, NULL);
+			gw.u64[1] = mbuf;
+		}
+	}
+
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
 
@@ -164,16 +214,29 @@ uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
 					       const struct rte_event ev[],
 					       uint16_t nb_events);
 
-uint16_t __rte_hot cn10k_sso_hws_deq(void *port, struct rte_event *ev,
-				     uint64_t timeout_ticks);
-uint16_t __rte_hot cn10k_sso_hws_deq_burst(void *port, struct rte_event ev[],
-					   uint16_t nb_events,
-					   uint64_t timeout_ticks);
-uint16_t __rte_hot cn10k_sso_hws_tmo_deq(void *port, struct rte_event *ev,
-					 uint64_t timeout_ticks);
-uint16_t __rte_hot cn10k_sso_hws_tmo_deq_burst(void *port,
-					       struct rte_event ev[],
-					       uint16_t nb_events,
-					       uint64_t timeout_ticks);
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	uint16_t __rte_hot cn10k_sso_hws_deq_##name(                           \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name(                     \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn10k_sso_hws_tmo_deq_##name(                       \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn10k_sso_hws_tmo_deq_burst_##name(                 \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name(                       \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name(                 \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn10k_sso_hws_tmo_deq_seg_##name(                   \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn10k_sso_hws_tmo_deq_seg_burst_##name(             \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);
+
+NIX_RX_FASTPATH_MODES
+#undef R
 
 #endif
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 4aa577bd5..e4383dca1 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -252,17 +252,179 @@ static void
 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	/* Single WS modes */
+	const event_dequeue_t sso_hws_deq[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_tmo_deq[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_tmo_deq_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_tmo_deq_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_tmo_deq_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_deq_seg[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_tmo_deq_seg[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_tmo_deq_seg_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_tmo_deq_seg_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_tmo_deq_seg_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	/* Dual WS modes */
+	const event_dequeue_t sso_hws_dual_deq[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_dual_deq_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_dual_tmo_deq[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_dual_tmo_deq_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_dual_tmo_deq_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_dual_tmo_deq_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_dual_deq_seg_burst[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_t sso_hws_dual_tmo_deq_seg[2][2][2][2] = {
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_dual_tmo_deq_seg_##name,
+		NIX_RX_FASTPATH_MODES
+#undef R
+	};
+
+	const event_dequeue_burst_t sso_hws_dual_tmo_deq_seg_burst[2][2][2][2] =
+		{
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	[f3][f2][f1][f0] = cn9k_sso_hws_dual_tmo_deq_seg_burst_##name,
+			NIX_RX_FASTPATH_MODES
+#undef R
+		};
 
 	event_dev->enqueue = cn9k_sso_hws_enq;
 	event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
 	event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
 	event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
-
-	event_dev->dequeue = cn9k_sso_hws_deq;
-	event_dev->dequeue_burst = cn9k_sso_hws_deq_burst;
-	if (dev->deq_tmo_ns) {
-		event_dev->dequeue = cn9k_sso_hws_tmo_deq;
-		event_dev->dequeue_burst = cn9k_sso_hws_tmo_deq_burst;
+	if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
+		event_dev->dequeue = sso_hws_deq_seg
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		event_dev->dequeue_burst = sso_hws_deq_seg_burst
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		if (dev->is_timeout_deq) {
+			event_dev->dequeue = sso_hws_tmo_deq_seg
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+			event_dev->dequeue_burst = sso_hws_tmo_deq_seg_burst
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		}
+	} else {
+		event_dev->dequeue = sso_hws_deq
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		event_dev->dequeue_burst = sso_hws_deq_burst
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+			[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		if (dev->is_timeout_deq) {
+			event_dev->dequeue = sso_hws_tmo_deq
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+			event_dev->dequeue_burst = sso_hws_tmo_deq_burst
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+		}
 	}
 
 	if (dev->dual_ws) {
@@ -272,14 +434,82 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		event_dev->enqueue_forward_burst =
 			cn9k_sso_hws_dual_enq_fwd_burst;
 
-		event_dev->dequeue = cn9k_sso_hws_dual_deq;
-		event_dev->dequeue_burst = cn9k_sso_hws_dual_deq_burst;
-		if (dev->deq_tmo_ns) {
-			event_dev->dequeue = cn9k_sso_hws_dual_tmo_deq;
-			event_dev->dequeue_burst =
-				cn9k_sso_hws_dual_tmo_deq_burst;
+		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
+			event_dev->dequeue = sso_hws_dual_deq_seg
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+			event_dev->dequeue_burst = sso_hws_dual_deq_seg_burst
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+			if (dev->is_timeout_deq) {
+				event_dev->dequeue = sso_hws_dual_tmo_deq_seg
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_CHECKSUM_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_PTYPE_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_RSS_F)];
+				event_dev->dequeue_burst =
+					sso_hws_dual_tmo_deq_seg_burst
+						[!!(dev->rx_offloads &
+						    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+						[!!(dev->rx_offloads &
+						    NIX_RX_OFFLOAD_CHECKSUM_F)]
+						[!!(dev->rx_offloads &
+						    NIX_RX_OFFLOAD_PTYPE_F)]
+						[!!(dev->rx_offloads &
+						    NIX_RX_OFFLOAD_RSS_F)];
+			}
+		} else {
+			event_dev->dequeue = sso_hws_dual_deq
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+			event_dev->dequeue_burst = sso_hws_dual_deq_burst
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+				[!!(dev->rx_offloads &
+				    NIX_RX_OFFLOAD_CHECKSUM_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+				[!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+			if (dev->is_timeout_deq) {
+				event_dev->dequeue = sso_hws_dual_tmo_deq
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_CHECKSUM_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_PTYPE_F)]
+					[!!(dev->rx_offloads &
+					    NIX_RX_OFFLOAD_RSS_F)];
+				event_dev->dequeue_burst =
+					sso_hws_dual_tmo_deq_burst
+						[!!(dev->rx_offloads &
+						    NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+						[!!(dev->rx_offloads &
+						    NIX_RX_OFFLOAD_CHECKSUM_F)]
+						[!!(dev->rx_offloads &
+						    NIX_RX_OFFLOAD_PTYPE_F)]
+						[!!(dev->rx_offloads &
+						    NIX_RX_OFFLOAD_RSS_F)];
+			}
 		}
 	}
+
+	rte_mb();
 }
 
 static void *
diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c
index 41ffd88a0..fb572c7c9 100644
--- a/drivers/event/cnxk/cn9k_worker.c
+++ b/drivers/event/cnxk/cn9k_worker.c
@@ -60,59 +60,121 @@ cn9k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
 	return 1;
 }
 
-uint16_t __rte_hot
-cn9k_sso_hws_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct cn9k_sso_hws *ws = port;
-
-	RTE_SET_USED(timeout_ticks);
-
-	if (ws->swtag_req) {
-		ws->swtag_req = 0;
-		cnxk_sso_hws_swtag_wait(ws->tag_op);
-		return 1;
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	uint16_t __rte_hot cn9k_sso_hws_deq_##name(                            \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn9k_sso_hws *ws = port;                                \
+                                                                               \
+		RTE_SET_USED(timeout_ticks);                                   \
+                                                                               \
+		if (ws->swtag_req) {                                           \
+			ws->swtag_req = 0;                                     \
+			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			return 1;                                              \
+		}                                                              \
+                                                                               \
+		return cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);   \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name(                      \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn9k_sso_hws_deq_##name(port, ev, timeout_ticks);       \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_tmo_deq_##name(                        \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn9k_sso_hws *ws = port;                                \
+		uint16_t ret = 1;                                              \
+		uint64_t iter;                                                 \
+                                                                               \
+		if (ws->swtag_req) {                                           \
+			ws->swtag_req = 0;                                     \
+			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			return ret;                                            \
+		}                                                              \
+                                                                               \
+		ret = cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);    \
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)     \
+			ret = cn9k_sso_hws_get_work(ws, ev, flags,             \
+						    ws->lookup_mem);           \
+                                                                               \
+		return ret;                                                    \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_tmo_deq_burst_##name(                  \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn9k_sso_hws_tmo_deq_##name(port, ev, timeout_ticks);   \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name(                        \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn9k_sso_hws *ws = port;                                \
+                                                                               \
+		RTE_SET_USED(timeout_ticks);                                   \
+                                                                               \
+		if (ws->swtag_req) {                                           \
+			ws->swtag_req = 0;                                     \
+			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			return 1;                                              \
+		}                                                              \
+                                                                               \
+		return cn9k_sso_hws_get_work(                                  \
+			ws, ev, flags | NIX_RX_MULTI_SEG_F, ws->lookup_mem);   \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name(                  \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn9k_sso_hws_deq_seg_##name(port, ev, timeout_ticks);   \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_tmo_deq_seg_##name(                    \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn9k_sso_hws *ws = port;                                \
+		uint16_t ret = 1;                                              \
+		uint64_t iter;                                                 \
+                                                                               \
+		if (ws->swtag_req) {                                           \
+			ws->swtag_req = 0;                                     \
+			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			return ret;                                            \
+		}                                                              \
+                                                                               \
+		ret = cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);    \
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)     \
+			ret = cn9k_sso_hws_get_work(ws, ev, flags,             \
+						    ws->lookup_mem);           \
+                                                                               \
+		return ret;                                                    \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_tmo_deq_seg_burst_##name(              \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn9k_sso_hws_tmo_deq_seg_##name(port, ev,               \
+						       timeout_ticks);         \
 	}
 
-	return cn9k_sso_hws_get_work(ws, ev);
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
-		       uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return cn9k_sso_hws_deq(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_tmo_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct cn9k_sso_hws *ws = port;
-	uint16_t ret = 1;
-	uint64_t iter;
-
-	if (ws->swtag_req) {
-		ws->swtag_req = 0;
-		cnxk_sso_hws_swtag_wait(ws->tag_op);
-		return ret;
-	}
-
-	ret = cn9k_sso_hws_get_work(ws, ev);
-	for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
-		ret = cn9k_sso_hws_get_work(ws, ev);
-
-	return ret;
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_tmo_deq_burst(void *port, struct rte_event ev[],
-			   uint16_t nb_events, uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return cn9k_sso_hws_tmo_deq(port, ev, timeout_ticks);
-}
+NIX_RX_FASTPATH_MODES
+#undef R
 
 /* Dual ws ops. */
 
@@ -172,65 +234,145 @@ cn9k_sso_hws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
 	return 1;
 }
 
-uint16_t __rte_hot
-cn9k_sso_hws_dual_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct cn9k_sso_hws_dual *dws = port;
-	uint16_t gw;
-
-	RTE_SET_USED(timeout_ticks);
-	if (dws->swtag_req) {
-		dws->swtag_req = 0;
-		cnxk_sso_hws_swtag_wait(dws->ws_state[!dws->vws].tag_op);
-		return 1;
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name(                       \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn9k_sso_hws_dual *dws = port;                          \
+		uint16_t gw;                                                   \
+                                                                               \
+		RTE_SET_USED(timeout_ticks);                                   \
+		if (dws->swtag_req) {                                          \
+			dws->swtag_req = 0;                                    \
+			cnxk_sso_hws_swtag_wait(                               \
+				dws->ws_state[!dws->vws].tag_op);              \
+			return 1;                                              \
+		}                                                              \
+                                                                               \
+		gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],      \
+						&dws->ws_state[!dws->vws], ev, \
+						flags, dws->lookup_mem);       \
+		dws->vws = !dws->vws;                                          \
+		return gw;                                                     \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name(                 \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn9k_sso_hws_dual_deq_##name(port, ev, timeout_ticks);  \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_##name(                   \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn9k_sso_hws_dual *dws = port;                          \
+		uint16_t ret = 1;                                              \
+		uint64_t iter;                                                 \
+                                                                               \
+		if (dws->swtag_req) {                                          \
+			dws->swtag_req = 0;                                    \
+			cnxk_sso_hws_swtag_wait(                               \
+				dws->ws_state[!dws->vws].tag_op);              \
+			return ret;                                            \
+		}                                                              \
+                                                                               \
+		ret = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],     \
+						 &dws->ws_state[!dws->vws],    \
+						 ev, flags, dws->lookup_mem);  \
+		dws->vws = !dws->vws;                                          \
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) {   \
+			ret = cn9k_sso_hws_dual_get_work(                      \
+				&dws->ws_state[dws->vws],                      \
+				&dws->ws_state[!dws->vws], ev, flags,          \
+				dws->lookup_mem);                              \
+			dws->vws = !dws->vws;                                  \
+		}                                                              \
+                                                                               \
+		return ret;                                                    \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_burst_##name(             \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn9k_sso_hws_dual_tmo_deq_##name(port, ev,              \
+							timeout_ticks);        \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name(                   \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn9k_sso_hws_dual *dws = port;                          \
+		uint16_t gw;                                                   \
+                                                                               \
+		RTE_SET_USED(timeout_ticks);                                   \
+		if (dws->swtag_req) {                                          \
+			dws->swtag_req = 0;                                    \
+			cnxk_sso_hws_swtag_wait(                               \
+				dws->ws_state[!dws->vws].tag_op);              \
+			return 1;                                              \
+		}                                                              \
+                                                                               \
+		gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],      \
+						&dws->ws_state[!dws->vws], ev, \
+						flags, dws->lookup_mem);       \
+		dws->vws = !dws->vws;                                          \
+		return gw;                                                     \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name(             \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn9k_sso_hws_dual_deq_seg_##name(port, ev,              \
+							timeout_ticks);        \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_seg_##name(               \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
+	{                                                                      \
+		struct cn9k_sso_hws_dual *dws = port;                          \
+		uint16_t ret = 1;                                              \
+		uint64_t iter;                                                 \
+                                                                               \
+		if (dws->swtag_req) {                                          \
+			dws->swtag_req = 0;                                    \
+			cnxk_sso_hws_swtag_wait(                               \
+				dws->ws_state[!dws->vws].tag_op);              \
+			return ret;                                            \
+		}                                                              \
+                                                                               \
+		ret = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],     \
+						 &dws->ws_state[!dws->vws],    \
+						 ev, flags, dws->lookup_mem);  \
+		dws->vws = !dws->vws;                                          \
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) {   \
+			ret = cn9k_sso_hws_dual_get_work(                      \
+				&dws->ws_state[dws->vws],                      \
+				&dws->ws_state[!dws->vws], ev, flags,          \
+				dws->lookup_mem);                              \
+			dws->vws = !dws->vws;                                  \
+		}                                                              \
+                                                                               \
+		return ret;                                                    \
+	}                                                                      \
+                                                                               \
+	uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_seg_burst_##name(         \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks)                                        \
+	{                                                                      \
+		RTE_SET_USED(nb_events);                                       \
+                                                                               \
+		return cn9k_sso_hws_dual_tmo_deq_seg_##name(port, ev,          \
+							    timeout_ticks);    \
 	}
 
-	gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],
-					&dws->ws_state[!dws->vws], ev);
-	dws->vws = !dws->vws;
-	return gw;
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_dual_deq_burst(void *port, struct rte_event ev[],
-			    uint16_t nb_events, uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return cn9k_sso_hws_dual_deq(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_dual_tmo_deq(void *port, struct rte_event *ev,
-			  uint64_t timeout_ticks)
-{
-	struct cn9k_sso_hws_dual *dws = port;
-	uint16_t ret = 1;
-	uint64_t iter;
-
-	if (dws->swtag_req) {
-		dws->swtag_req = 0;
-		cnxk_sso_hws_swtag_wait(dws->ws_state[!dws->vws].tag_op);
-		return ret;
-	}
-
-	ret = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],
-					 &dws->ws_state[!dws->vws], ev);
-	dws->vws = !dws->vws;
-	for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) {
-		ret = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],
-						 &dws->ws_state[!dws->vws], ev);
-		dws->vws = !dws->vws;
-	}
-
-	return ret;
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_dual_tmo_deq_burst(void *port, struct rte_event ev[],
-				uint16_t nb_events, uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return cn9k_sso_hws_dual_tmo_deq(port, ev, timeout_ticks);
-}
+NIX_RX_FASTPATH_MODES
+#undef R
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index b5af5ecf4..bbdca3c95 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -128,17 +128,38 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
 	}
 }
 
+static __rte_always_inline void
+cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
+		 const uint32_t tag, const uint32_t flags,
+		 const void *const lookup_mem)
+{
+	union mbuf_initializer mbuf_init = {
+		.fields = {.data_off = RTE_PKTMBUF_HEADROOM,
+			   .refcnt = 1,
+			   .nb_segs = 1,
+			   .port = port_id},
+	};
+
+	cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
+			     (struct rte_mbuf *)mbuf, lookup_mem,
+			     mbuf_init.value, flags);
+}
+
 static __rte_always_inline uint16_t
 cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
 			   struct cn9k_sso_hws_state *ws_pair,
-			   struct rte_event *ev)
+			   struct rte_event *ev, const uint32_t flags,
+			   const void *const lookup_mem)
 {
 	const uint64_t set_gw = BIT_ULL(16) | 1;
 	union {
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
+	uint64_t mbuf;
 
+	if (flags & NIX_RX_OFFLOAD_PTYPE_F)
+		rte_prefetch_non_temporal(lookup_mem);
 #ifdef RTE_ARCH_ARM64
 	asm volatile(PLT_CPU_FEATURE_PREAMBLE
 		     "rty%=:					\n"
@@ -147,7 +168,10 @@ cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
 		     "		tbnz %[tag], 63, rty%=		\n"
 		     "done%=:	str %[gw], [%[pong]]		\n"
 		     "		dmb ld				\n"
-		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
+		     "		sub %[mbuf], %[wqp], #0x80	\n"
+		     "		prfm pldl1keep, [%[mbuf]]	\n"
+		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
+		       [mbuf] "=&r"(mbuf)
 		     : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
 		       [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
 #else
@@ -156,12 +180,26 @@ cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
 		gw.u64[0] = plt_read64(ws->tag_op);
 	gw.u64[1] = plt_read64(ws->wqp_op);
 	plt_write64(set_gw, ws_pair->getwrk_op);
+	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
 	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
 		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
 		    (gw.u64[0] & 0xffffffff);
 
+	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+		    RTE_EVENT_TYPE_ETHDEV) {
+			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+					 gw.u64[0] & 0xFFFFF, flags,
+					 lookup_mem);
+			gw.u64[1] = mbuf;
+		}
+	}
+
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
 
@@ -169,16 +207,21 @@ cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
 }
 
 static __rte_always_inline uint16_t
-cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev)
+cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
+		      const uint32_t flags, const void *const lookup_mem)
 {
 	union {
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
+	uint64_t mbuf;
 
 	plt_write64(BIT_ULL(16) | /* wait for work. */
 			    1,	  /* Use Mask set 0. */
 		    ws->getwrk_op);
+
+	if (flags & NIX_RX_OFFLOAD_PTYPE_F)
+		rte_prefetch_non_temporal(lookup_mem);
 #ifdef RTE_ARCH_ARM64
 	asm volatile(PLT_CPU_FEATURE_PREAMBLE
 		     "		ldr %[tag], [%[tag_loc]]	\n"
@@ -190,7 +233,10 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev)
 		     "		ldr %[wqp], [%[wqp_loc]]	\n"
 		     "		tbnz %[tag], 63, rty%=		\n"
 		     "done%=:	dmb ld				\n"
-		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
+		     "		sub %[mbuf], %[wqp], #0x80	\n"
+		     "		prfm pldl1keep, [%[mbuf]]	\n"
+		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
+		       [mbuf] "=&r"(mbuf)
 		     : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
 #else
 	gw.u64[0] = plt_read64(ws->tag_op);
@@ -198,12 +244,26 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev)
 		gw.u64[0] = plt_read64(ws->tag_op);
 
 	gw.u64[1] = plt_read64(ws->wqp_op);
+	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
 	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
 		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
 		    (gw.u64[0] & 0xffffffff);
 
+	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+		    RTE_EVENT_TYPE_ETHDEV) {
+			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+					 gw.u64[0] & 0xFFFFF, flags,
+					 lookup_mem);
+			gw.u64[1] = mbuf;
+		}
+	}
+
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
 
@@ -218,6 +278,7 @@ cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
+	uint64_t mbuf;
 
 #ifdef RTE_ARCH_ARM64
 	asm volatile(PLT_CPU_FEATURE_PREAMBLE
@@ -230,7 +291,9 @@ cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
 		     "		ldr %[wqp], [%[wqp_loc]]	\n"
 		     "		tbnz %[tag], 63, rty%=		\n"
 		     "done%=:	dmb ld				\n"
-		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
+		     "		sub %[mbuf], %[wqp], #0x80	\n"
+		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
+		       [mbuf] "=&r"(mbuf)
 		     : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
 #else
 	gw.u64[0] = plt_read64(ws->tag_op);
@@ -238,12 +301,25 @@ cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
 		gw.u64[0] = plt_read64(ws->tag_op);
 
 	gw.u64[1] = plt_read64(ws->wqp_op);
+	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
 	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
 		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
 		    (gw.u64[0] & 0xffffffff);
 
+	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+		    RTE_EVENT_TYPE_ETHDEV) {
+			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+					 gw.u64[0] & 0xFFFFF, 0, NULL);
+			gw.u64[1] = mbuf;
+		}
+	}
+
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
 
@@ -274,28 +350,54 @@ uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
 						   const struct rte_event ev[],
 						   uint16_t nb_events);
 
-uint16_t __rte_hot cn9k_sso_hws_deq(void *port, struct rte_event *ev,
-				    uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_deq_burst(void *port, struct rte_event ev[],
-					  uint16_t nb_events,
-					  uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_tmo_deq(void *port, struct rte_event *ev,
-					uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_tmo_deq_burst(void *port, struct rte_event ev[],
-					      uint16_t nb_events,
-					      uint64_t timeout_ticks);
-
-uint16_t __rte_hot cn9k_sso_hws_dual_deq(void *port, struct rte_event *ev,
-					 uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst(void *port,
-					       struct rte_event ev[],
-					       uint16_t nb_events,
-					       uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq(void *port, struct rte_event *ev,
-					     uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_burst(void *port,
-						   struct rte_event ev[],
-						   uint16_t nb_events,
-						   uint64_t timeout_ticks);
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	uint16_t __rte_hot cn9k_sso_hws_deq_##name(                            \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name(                      \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_tmo_deq_##name(                        \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn9k_sso_hws_tmo_deq_burst_##name(                  \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name(                        \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name(                  \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_tmo_deq_seg_##name(                    \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn9k_sso_hws_tmo_deq_seg_burst_##name(              \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);
+
+NIX_RX_FASTPATH_MODES
+#undef R
+
+#define R(name, f3, f2, f1, f0, flags)                                         \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name(                       \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name(                 \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_##name(                   \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_burst_##name(             \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name(                   \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name(             \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);                                       \
+	uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_seg_##name(               \
+		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
+	uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_seg_burst_##name(         \
+		void *port, struct rte_event ev[], uint16_t nb_events,         \
+		uint64_t timeout_ticks);
+
+NIX_RX_FASTPATH_MODES
+#undef R
 
 #endif
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index b40e39397..533ad853a 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -8,6 +8,14 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
 	subdir_done()
 endif
 
+extra_flags = ['-Wno-strict-aliasing']
+foreach flag: extra_flags
+	if cc.has_argument(flag)
+		cflags += flag
+	endif
+endforeach
+
+
 sources = files('cn10k_worker.c',
 		'cn10k_eventdev.c',
 		'cn9k_worker.c',
-- 
2.17.1


  parent reply	other threads:[~2021-03-06 16:35 UTC|newest]

Thread overview: 185+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-06 16:29 [dpdk-dev] [PATCH 00/36] Marvell CNXK Event device Driver pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 01/36] event/cnxk: add build infra and device setup pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 02/36] event/cnxk: add device capabilities function pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 03/36] event/cnxk: add platform specific device probe pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 04/36] event/cnxk: add common configuration validation pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 05/36] event/cnxk: add platform specific device config pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 06/36] event/cnxk: add event queue config functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 07/36] event/cnxk: allocate event inflight buffers pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 08/36] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 09/36] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 10/36] event/cnxk: add port config functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 11/36] event/cnxk: add event port link and unlink pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 12/36] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 13/36] event/cnxk: add SSO HW device operations pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 14/36] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 15/36] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 16/36] event/cnxk: add device start function pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 17/36] event/cnxk: add device stop and close functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 18/36] event/cnxk: add SSO selftest and dump pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 19/36] event/cnxk: support event timer pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 20/36] event/cnxk: add timer adapter capabilities pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 21/36] event/cnxk: create and free timer adapter pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 22/36] event/cnxk: add devargs to disable NPA pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 23/36] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 24/36] event/cnxk: add timer adapter info function pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 25/36] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 26/36] event/cnxk: add TIM bucket operations pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 27/36] event/cnxk: add timer arm routine pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 28/36] event/cnxk: add timer arm timeout burst pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 29/36] event/cnxk: add timer cancel function pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 30/36] event/cnxk: add timer stats get and reset pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 31/36] event/cnxk: add timer adapter start and stop pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 32/36] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 33/36] event/cnxk: add Rx adapter support pbhagavatula
2021-03-06 16:29 ` pbhagavatula [this message]
2021-03-06 16:29 ` [dpdk-dev] [PATCH 35/36] event/cnxk: add Tx " pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 36/36] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-04-26 17:44 ` [dpdk-dev] [PATCH v2 00/33] Marvell CNXK Event device Driver pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 01/33] event/cnxk: add build infra and device setup pbhagavatula
2021-04-27  9:08     ` Kinsella, Ray
2021-04-28  8:01     ` David Marchand
2021-04-29  9:05     ` Jerin Jacob
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 02/33] event/cnxk: add device capabilities function pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 03/33] event/cnxk: add platform specific device probe pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 04/33] event/cnxk: add common configuration validation pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 05/33] event/cnxk: add platform specific device config pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 06/33] event/cnxk: add event queue config functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 07/33] event/cnxk: allocate event inflight buffers pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 08/33] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 09/33] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 10/33] event/cnxk: add port config functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 11/33] event/cnxk: add event port link and unlink pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 12/33] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 13/33] event/cnxk: add SSO HW device operations pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 14/33] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 15/33] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 16/33] event/cnxk: add device start function pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 17/33] event/cnxk: add device stop and close functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 18/33] event/cnxk: add SSO selftest and dump pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 19/33] event/cnxk: add event port and queue xstats pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 20/33] event/cnxk: support event timer pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 21/33] event/cnxk: add timer adapter capabilities pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 22/33] event/cnxk: create and free timer adapter pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 23/33] event/cnxk: add devargs to disable NPA pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 24/33] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 25/33] event/cnxk: add timer adapter info function pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 26/33] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 27/33] event/cnxk: add TIM bucket operations pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 28/33] event/cnxk: add timer arm routine pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 29/33] event/cnxk: add timer arm timeout burst pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 30/33] event/cnxk: add timer cancel function pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 31/33] event/cnxk: add timer stats get and reset pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 32/33] event/cnxk: add timer adapter start and stop pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 33/33] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-04-30  5:11   ` [dpdk-dev] [PATCH v2 00/33] Marvell CNXK Event device Driver Jerin Jacob
2021-04-30 13:53   ` [dpdk-dev] [PATCH v3 " pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 01/33] event/cnxk: add build infra and device setup pbhagavatula
2021-05-03  9:41       ` Jerin Jacob
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 02/33] event/cnxk: add device capabilities function pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 03/33] event/cnxk: add platform specific device probe pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 04/33] event/cnxk: add common configuration validation pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 05/33] event/cnxk: add platform specific device config pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 06/33] event/cnxk: add event queue config functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 07/33] event/cnxk: allocate event inflight buffers pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 08/33] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 09/33] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 10/33] event/cnxk: add port config functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 11/33] event/cnxk: add event port link and unlink pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 12/33] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 13/33] event/cnxk: add SSO HW device operations pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 14/33] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 15/33] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 16/33] event/cnxk: add device start function pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 17/33] event/cnxk: add device stop and close functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 18/33] event/cnxk: add SSO selftest and dump pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 19/33] event/cnxk: add event port and queue xstats pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 20/33] event/cnxk: support event timer pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 21/33] event/cnxk: add timer adapter capabilities pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 22/33] event/cnxk: create and free timer adapter pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 23/33] event/cnxk: add devargs to disable NPA pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 24/33] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 25/33] event/cnxk: add timer adapter info function pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 26/33] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 27/33] event/cnxk: add TIM bucket operations pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 28/33] event/cnxk: add timer arm routine pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 29/33] event/cnxk: add timer arm timeout burst pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 30/33] event/cnxk: add timer cancel function pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 31/33] event/cnxk: add timer stats get and reset pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 32/33] event/cnxk: add timer adapter start and stop pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 33/33] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-05-01 12:03     ` [dpdk-dev] [PATCH v3 00/33] Marvell CNXK Event device Driver Jerin Jacob
2021-05-03 15:22     ` [dpdk-dev] [PATCH v4 00/34] " pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 01/34] common/cnxk: rename deprecated constant pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 02/34] event/cnxk: add build infra and device setup pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 03/34] event/cnxk: add device capabilities function pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 04/34] event/cnxk: add platform specific device probe pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 05/34] event/cnxk: add common configuration validation pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 06/34] event/cnxk: add platform specific device config pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 07/34] event/cnxk: add event queue config functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 08/34] event/cnxk: allocate event inflight buffers pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 09/34] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 10/34] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 11/34] event/cnxk: add port config functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 12/34] event/cnxk: add event port link and unlink pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 13/34] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 14/34] event/cnxk: add SSO HW device operations pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 15/34] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 16/34] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 17/34] event/cnxk: add device start function pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 18/34] event/cnxk: add device stop and close functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 19/34] event/cnxk: add SSO selftest and dump pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 20/34] event/cnxk: add event port and queue xstats pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 21/34] event/cnxk: support event timer pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 22/34] event/cnxk: add timer adapter capabilities pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 23/34] event/cnxk: create and free timer adapter pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 24/34] event/cnxk: add devargs to disable NPA pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 25/34] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 26/34] event/cnxk: add timer adapter info function pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 27/34] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 28/34] event/cnxk: add TIM bucket operations pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 29/34] event/cnxk: add timer arm routine pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 30/34] event/cnxk: add timer arm timeout burst pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 31/34] event/cnxk: add timer cancel function pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 32/34] event/cnxk: add timer stats get and reset pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 33/34] event/cnxk: add timer adapter start and stop pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 34/34] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-05-04  0:26       ` [dpdk-dev] [PATCH v5 00/35] Marvell CNXK Event device Driver pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 01/35] common/cnxk: rename deprecated constant pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 02/35] common/cnxk: update inline asm prefix pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 03/35] event/cnxk: add build infra and device setup pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 04/35] event/cnxk: add device capabilities function pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 05/35] event/cnxk: add platform specific device probe pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 06/35] event/cnxk: add common configuration validation pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 07/35] event/cnxk: add platform specific device config pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 08/35] event/cnxk: add event queue config functions pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 09/35] event/cnxk: allocate event inflight buffers pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 10/35] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 11/35] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 12/35] event/cnxk: add port config functions pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 13/35] event/cnxk: add event port link and unlink pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 14/35] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 15/35] event/cnxk: add SSO HW device operations pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 16/35] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 17/35] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 18/35] event/cnxk: add device start function pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 19/35] event/cnxk: add device stop and close functions pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 20/35] event/cnxk: add SSO selftest and dump pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 21/35] event/cnxk: add event port and queue xstats pbhagavatula
2021-05-04  9:51           ` Kinsella, Ray
2021-05-04 10:08             ` Jerin Jacob
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 22/35] event/cnxk: support event timer pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 23/35] event/cnxk: add timer adapter capabilities pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 24/35] event/cnxk: create and free timer adapter pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 25/35] event/cnxk: add devargs to disable NPA pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 26/35] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 27/35] event/cnxk: add timer adapter info function pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 28/35] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 29/35] event/cnxk: add TIM bucket operations pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 30/35] event/cnxk: add timer arm routine pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 31/35] event/cnxk: add timer arm timeout burst pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 32/35] event/cnxk: add timer cancel function pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 33/35] event/cnxk: add timer stats get and reset pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 34/35] event/cnxk: add timer adapter start and stop pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 35/35] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-05-04  8:30         ` [dpdk-dev] [PATCH v5 00/35] Marvell CNXK Event device Driver Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210306162942.6845-35-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).