DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 1/2] event/cnxk: add additional checks in OP_RELEASE
@ 2022-04-26 21:47 Pavan Nikhilesh
  2022-04-26 21:47 ` [PATCH 2/2] event/cnxk: move post-processing to separate function Pavan Nikhilesh
  2022-04-27 10:58 ` [PATCH v2 1/2] event/cnxk: add additional checks in OP_RELEASE Pavan Nikhilesh
  0 siblings, 2 replies; 5+ messages in thread
From: Pavan Nikhilesh @ 2022-04-26 21:47 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

Add additional checks while performing RTE_EVENT_OP_RELEASE to
ensure that there are no pending SWTAGs and FLUSHEs in flight.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c |  4 +---
 drivers/event/cnxk/cn10k_worker.c   |  8 ++++++--
 drivers/event/cnxk/cn9k_eventdev.c  |  4 +---
 drivers/event/cnxk/cn9k_worker.c    | 16 ++++++++++++----
 drivers/event/cnxk/cn9k_worker.h    |  3 +--
 drivers/event/cnxk/cnxk_worker.h    | 17 ++++++++++++++---
 6 files changed, 35 insertions(+), 17 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 9b4d2895ec..2fa2cd31c2 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -137,9 +137,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
-			cnxk_sso_hws_swtag_flush(
-				ws->base + SSOW_LF_GWS_WQE0,
-				ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+			cnxk_sso_hws_swtag_flush(ws->base);
 		do {
 			val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
 		} while (val & BIT_ULL(56));
diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c
index 975a22336a..0d99b4c5e5 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -18,8 +18,12 @@ cn10k_sso_hws_enq(void *port, const struct rte_event *ev)
 		cn10k_sso_hws_forward_event(ws, ev);
 		break;
 	case RTE_EVENT_OP_RELEASE:
-		cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_WQE0,
-					 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+		if (ws->swtag_req) {
+			cnxk_sso_hws_desched(ev->u64, ws->base);
+			ws->swtag_req = 0;
+			break;
+		}
+		cnxk_sso_hws_swtag_flush(ws->base);
 		break;
 	default:
 		return 0;
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 4bba477dd1..41bbe3cb22 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -156,9 +156,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
-			cnxk_sso_hws_swtag_flush(
-				ws_base + SSOW_LF_GWS_TAG,
-				ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+			cnxk_sso_hws_swtag_flush(ws_base);
 		do {
 			val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
 		} while (val & BIT_ULL(56));
diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c
index a981bc986f..41dbe6cafb 100644
--- a/drivers/event/cnxk/cn9k_worker.c
+++ b/drivers/event/cnxk/cn9k_worker.c
@@ -19,8 +19,12 @@ cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
 		cn9k_sso_hws_forward_event(ws, ev);
 		break;
 	case RTE_EVENT_OP_RELEASE:
-		cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_TAG,
-					 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+		if (ws->swtag_req) {
+			cnxk_sso_hws_desched(ev->u64, ws->base);
+			ws->swtag_req = 0;
+			break;
+		}
+		cnxk_sso_hws_swtag_flush(ws->base);
 		break;
 	default:
 		return 0;
@@ -78,8 +82,12 @@ cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev)
 		cn9k_sso_hws_dual_forward_event(dws, base, ev);
 		break;
 	case RTE_EVENT_OP_RELEASE:
-		cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
-					 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+		if (dws->swtag_req) {
+			cnxk_sso_hws_desched(ev->u64, base);
+			dws->swtag_req = 0;
+			break;
+		}
+		cnxk_sso_hws_swtag_flush(base);
 		break;
 	default:
 		return 0;
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 917d1e0b40..88eb4e9cf9 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -841,8 +841,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 			return 1;
 	}
 
-	cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
-				 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+	cnxk_sso_hws_swtag_flush(base);
 
 	return 1;
 }
diff --git a/drivers/event/cnxk/cnxk_worker.h b/drivers/event/cnxk/cnxk_worker.h
index 7de03f3fbb..5e5e96b1ec 100644
--- a/drivers/event/cnxk/cnxk_worker.h
+++ b/drivers/event/cnxk/cnxk_worker.h
@@ -45,11 +45,15 @@ cnxk_sso_hws_swtag_untag(uintptr_t swtag_untag_op)
 }
 
 static __rte_always_inline void
-cnxk_sso_hws_swtag_flush(uint64_t tag_op, uint64_t flush_op)
+cnxk_sso_hws_swtag_flush(uint64_t base)
 {
-	if (CNXK_TT_FROM_TAG(plt_read64(tag_op)) == SSO_TT_EMPTY)
+	/* Ensure that there is no previous flush is pending. */
+	while (plt_read64(base + SSOW_LF_GWS_PENDSTATE) & BIT_ULL(56))
+		;
+	if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG)) ==
+	    SSO_TT_EMPTY)
 		return;
-	plt_write64(0, flush_op);
+	plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
 }
 
 static __rte_always_inline uint64_t
@@ -78,4 +82,11 @@ cnxk_sso_hws_swtag_wait(uintptr_t tag_op)
 	return swtp;
 }
 
+static __rte_always_inline void
+cnxk_sso_hws_desched(uint64_t u64, uint64_t base)
+{
+	plt_write64(u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
+	plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+}
+
 #endif
-- 
2.35.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 2/2] event/cnxk: move post-processing to separate function
  2022-04-26 21:47 [PATCH 1/2] event/cnxk: add additional checks in OP_RELEASE Pavan Nikhilesh
@ 2022-04-26 21:47 ` Pavan Nikhilesh
  2022-05-16 15:21   ` Jerin Jacob
  2022-04-27 10:58 ` [PATCH v2 1/2] event/cnxk: add additional checks in OP_RELEASE Pavan Nikhilesh
  1 sibling, 1 reply; 5+ messages in thread
From: Pavan Nikhilesh @ 2022-04-26 21:47 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

Move event post-processing to a separate function.
Do complete event post-processing in tear-down functions to prevent
incorrect memory free.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c |   5 +-
 drivers/event/cnxk/cn10k_worker.h   | 190 +++++++++++++---------------
 drivers/event/cnxk/cn9k_eventdev.c  |   9 +-
 drivers/event/cnxk/cn9k_worker.h    | 114 ++++++-----------
 4 files changed, 138 insertions(+), 180 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 2fa2cd31c2..94829e789c 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -133,7 +133,10 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 
 	while (aq_cnt || cq_ds_cnt || ds_cnt) {
 		plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
-		cn10k_sso_hws_get_work_empty(ws, &ev);
+		cn10k_sso_hws_get_work_empty(
+			ws, &ev,
+			(NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+				NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index c96048f47d..03bae4bd53 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -196,15 +196,88 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 	}
 }
 
+static __rte_always_inline void
+cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
+			   const uint32_t flags)
+{
+	uint64_t tstamp_ptr;
+
+	u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
+		 (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
+	if ((flags & CPT_RX_WQE_F) &&
+	    (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
+		u64[1] = cn10k_cpt_crypto_adapter_dequeue(u64[1]);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
+		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
+		uint64_t mbuf;
+
+		mbuf = u64[1] - sizeof(struct rte_mbuf);
+		rte_prefetch0((void *)mbuf);
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			const uint64_t mbuf_init =
+				0x100010000ULL | RTE_PKTMBUF_HEADROOM |
+				(flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+			struct rte_mbuf *m;
+			uintptr_t sa_base;
+			uint64_t iova = 0;
+			uint8_t loff = 0;
+			uint16_t d_off;
+			uint64_t cq_w1;
+			uint64_t cq_w5;
+
+			m = (struct rte_mbuf *)mbuf;
+			d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
+			d_off += RTE_PKTMBUF_HEADROOM;
+
+			cq_w1 = *(uint64_t *)(u64[1] + 8);
+			cq_w5 = *(uint64_t *)(u64[1] + 40);
+
+			sa_base = cnxk_nix_sa_base_get(port, ws->lookup_mem);
+			sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+
+			mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
+				cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
+				(struct rte_mbuf *)mbuf, d_off, flags,
+				mbuf_init | ((uint64_t)port) << 48);
+			if (loff)
+				roc_npa_aura_op_free(m->pool->pool_id, 0, iova);
+		}
+
+		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
+		cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
+				  ws->lookup_mem);
+		/* Extracting tstamp, if PTP enabled*/
+		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
+					   CNXK_SSO_WQE_SG_PTR);
+		cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
+					 flags & NIX_RX_OFFLOAD_TSTAMP_F,
+					 (uint64_t *)tstamp_ptr);
+		u64[1] = mbuf;
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
+		   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
+		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
+		__uint128_t vwqe_hdr = *(__uint128_t *)u64[1];
+
+		vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
+			   ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
+		*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
+		cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
+				   ws->tstamp, ws->lmt_base);
+		/* Mark vector mempool object as get */
+		RTE_MEMPOOL_CHECK_COOKIES(
+			rte_mempool_from_obj((void *)gw.u64[1]),
+			(void **)&u64[1], 1, 1);
+	}
+}
+
 static __rte_always_inline uint16_t
 cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
-		       const uint32_t flags, void *lookup_mem)
+		       const uint32_t flags)
 {
 	union {
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
-	uint64_t tstamp_ptr;
 
 	gw.get_work = ws->gw_wdata;
 #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
@@ -222,83 +295,8 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
 	} while (gw.u64[0] & BIT_ULL(63));
 #endif
 	ws->gw_rdata = gw.u64[0];
-	if (gw.u64[1]) {
-		gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-			    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-			    (gw.u64[0] & 0xffffffff);
-		if ((flags & CPT_RX_WQE_F) &&
-		    (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		     RTE_EVENT_TYPE_CRYPTODEV)) {
-			gw.u64[1] = cn10k_cpt_crypto_adapter_dequeue(gw.u64[1]);
-		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-			   RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-			uint64_t mbuf;
-
-			mbuf = gw.u64[1] - sizeof(struct rte_mbuf);
-			rte_prefetch0((void *)mbuf);
-			if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
-				const uint64_t mbuf_init = 0x100010000ULL |
-					RTE_PKTMBUF_HEADROOM |
-					(flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
-				struct rte_mbuf *m;
-				uintptr_t sa_base;
-				uint64_t iova = 0;
-				uint8_t loff = 0;
-				uint16_t d_off;
-				uint64_t cq_w1;
-				uint64_t cq_w5;
-
-				m = (struct rte_mbuf *)mbuf;
-				d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
-				d_off += RTE_PKTMBUF_HEADROOM;
-
-				cq_w1 = *(uint64_t *)(gw.u64[1] + 8);
-				cq_w5 = *(uint64_t *)(gw.u64[1] + 40);
-
-				sa_base =
-					cnxk_nix_sa_base_get(port, lookup_mem);
-				sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
-
-				mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
-					cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
-					(struct rte_mbuf *)mbuf, d_off, flags,
-					mbuf_init | ((uint64_t)port) << 48);
-				if (loff)
-					roc_npa_aura_op_free(m->pool->pool_id,
-							     0, iova);
-			}
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					  gw.u64[0] & 0xFFFFF, flags,
-					  lookup_mem);
-			/* Extracting tstamp, if PTP enabled*/
-			tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
-							    gw.u64[1]) +
-						   CNXK_SSO_WQE_SG_PTR);
-			cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
-						ws->tstamp,
-						flags & NIX_RX_OFFLOAD_TSTAMP_F,
-						(uint64_t *)tstamp_ptr);
-			gw.u64[1] = mbuf;
-		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-			   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-			__uint128_t vwqe_hdr = *(__uint128_t *)gw.u64[1];
-
-			vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
-				   ((vwqe_hdr & 0xFFFF) << 48) |
-				   ((uint64_t)port << 32);
-			*(uint64_t *)gw.u64[1] = (uint64_t)vwqe_hdr;
-			cn10k_process_vwqe(gw.u64[1], port, flags, lookup_mem,
-					   ws->tstamp, ws->lmt_base);
-			/* Mark vector mempool object as get */
-			RTE_MEMPOOL_CHECK_COOKIES(
-				rte_mempool_from_obj((void *)gw.u64[1]),
-				(void **)&gw.u64[1], 1, 1);
-		}
-	}
+	if (gw.u64[1])
+		cn10k_sso_hws_post_process(ws, gw.u64, flags);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
@@ -308,13 +306,13 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
 
 /* Used in cleaning up workslot. */
 static __rte_always_inline uint16_t
-cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
+cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev,
+			     const uint32_t flags)
 {
 	union {
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
-	uint64_t mbuf;
 
 #ifdef RTE_ARCH_ARM64
 	asm volatile(PLT_CPU_FEATURE_PREAMBLE
@@ -325,9 +323,7 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
 		     "		ldp %[tag], %[wqp], [%[tag_loc]]	\n"
 		     "		tbnz %[tag], 63, rty%=			\n"
 		     "done%=:	dmb ld					\n"
-		     "		sub %[mbuf], %[wqp], #0x80		\n"
-		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
-		       [mbuf] "=&r"(mbuf)
+		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
 		     : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
 		     : "memory");
 #else
@@ -335,24 +331,11 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
 		roc_load_pair(gw.u64[0], gw.u64[1],
 			      ws->base + SSOW_LF_GWS_WQE0);
 	} while (gw.u64[0] & BIT_ULL(63));
-	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
-	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-		    (gw.u64[0] & 0xffffffff);
-
-	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
-		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		    RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					  gw.u64[0] & 0xFFFFF, 0, NULL);
-			gw.u64[1] = mbuf;
-		}
-	}
+	ws->gw_rdata = gw.u64[0];
+	if (gw.u64[1])
+		cn10k_sso_hws_post_process(ws, gw.u64, flags);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
@@ -471,7 +454,7 @@ NIX_RX_FASTPATH_MODES
 				ws->base + SSOW_LF_GWS_WQE0);                  \
 			return 1;                                              \
 		}                                                              \
-		return cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);  \
+		return cn10k_sso_hws_get_work(ws, ev, flags);                  \
 	}
 
 #define SSO_DEQ_SEG(fn, flags)	  SSO_DEQ(fn, flags | NIX_RX_MULTI_SEG_F)
@@ -491,10 +474,9 @@ NIX_RX_FASTPATH_MODES
 				ws->base + SSOW_LF_GWS_WQE0);                  \
 			return ret;                                            \
 		}                                                              \
-		ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);   \
+		ret = cn10k_sso_hws_get_work(ws, ev, flags);                   \
 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)     \
-			ret = cn10k_sso_hws_get_work(ws, ev, flags,            \
-						     ws->lookup_mem);          \
+			ret = cn10k_sso_hws_get_work(ws, ev, flags);           \
 		return ret;                                                    \
 	}
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 41bbe3cb22..987888d3db 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -122,6 +122,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 			  cnxk_handle_event_t fn, void *arg)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
+	struct cnxk_timesync_info *tstamp;
 	struct cn9k_sso_hws_dual *dws;
 	struct cn9k_sso_hws *ws;
 	uint64_t cq_ds_cnt = 1;
@@ -130,6 +131,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 	struct rte_event ev;
 	uintptr_t ws_base;
 	uint64_t val, req;
+	void *lookup_mem;
 
 	plt_write64(0, base + SSO_LF_GGRP_QCTL);
 
@@ -145,14 +147,19 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 	if (dev->dual_ws) {
 		dws = hws;
 		ws_base = dws->base[0];
+		lookup_mem = dws->lookup_mem;
+		tstamp = dws->tstamp;
 	} else {
 		ws = hws;
 		ws_base = ws->base;
+		lookup_mem = ws->lookup_mem;
+		tstamp = ws->tstamp;
 	}
 
 	while (aq_cnt || cq_ds_cnt || ds_cnt) {
 		plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
-		cn9k_sso_hws_get_work_empty(ws_base, &ev);
+		cn9k_sso_hws_get_work_empty(ws_base, &ev, dev->rx_offloads,
+					    lookup_mem, tstamp);
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 88eb4e9cf9..64e97e321a 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -169,6 +169,34 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
 			     mbuf_init | ((uint64_t)port_id) << 48, flags);
 }
 
+static __rte_always_inline void
+cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
+			  const void *const lookup_mem,
+			  struct cnxk_timesync_info *tstamp)
+{
+	uint64_t tstamp_ptr;
+
+	u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
+		 (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
+	if ((flags & CPT_RX_WQE_F) &&
+	    (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
+		u64[1] = cn9k_cpt_crypto_adapter_dequeue(u64[1]);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
+		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
+
+		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
+		cn9k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
+				 lookup_mem);
+		/* Extracting tstamp, if PTP enabled*/
+		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
+					   CNXK_SSO_WQE_SG_PTR);
+		cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
+					flags & NIX_RX_OFFLOAD_TSTAMP_F,
+					(uint64_t *)tstamp_ptr);
+		u64[1] = mbuf;
+	}
+}
+
 static __rte_always_inline uint16_t
 cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
 			   struct rte_event *ev, const uint32_t flags,
@@ -178,7 +206,6 @@ cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
-	uint64_t tstamp_ptr;
 	uint64_t mbuf;
 
 	if (flags & NIX_RX_OFFLOAD_PTYPE_F)
@@ -207,34 +234,9 @@ cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
-	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-		    (gw.u64[0] & 0xffffffff);
-
-	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
-		if ((flags & CPT_RX_WQE_F) &&
-		    (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		     RTE_EVENT_TYPE_CRYPTODEV)) {
-			gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
-		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-			   RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					 gw.u64[0] & 0xFFFFF, flags,
-					 dws->lookup_mem);
-			/* Extracting tstamp, if PTP enabled*/
-			tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
-							    gw.u64[1]) +
-						   CNXK_SSO_WQE_SG_PTR);
-			cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
-						dws->tstamp,
-						flags & NIX_RX_OFFLOAD_TSTAMP_F,
-						(uint64_t *)tstamp_ptr);
-			gw.u64[1] = mbuf;
-		}
-	}
+	if (gw.u64[1])
+		cn9k_sso_hws_post_process(gw.u64, mbuf, flags, dws->lookup_mem,
+					  dws->tstamp);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
@@ -250,7 +252,6 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
-	uint64_t tstamp_ptr;
 	uint64_t mbuf;
 
 	plt_write64(ws->gw_wdata, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
@@ -283,34 +284,9 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
-	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-		    (gw.u64[0] & 0xffffffff);
-
-	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
-		if ((flags & CPT_RX_WQE_F) &&
-		    (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		     RTE_EVENT_TYPE_CRYPTODEV)) {
-			gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
-		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-			   RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					 gw.u64[0] & 0xFFFFF, flags,
-					 lookup_mem);
-			/* Extracting tstamp, if PTP enabled*/
-			tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
-							    gw.u64[1]) +
-						   CNXK_SSO_WQE_SG_PTR);
-			cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
-						ws->tstamp,
-						flags & NIX_RX_OFFLOAD_TSTAMP_F,
-						(uint64_t *)tstamp_ptr);
-			gw.u64[1] = mbuf;
-		}
-	}
+	if (gw.u64[1])
+		cn9k_sso_hws_post_process(gw.u64, mbuf, flags, lookup_mem,
+					  ws->tstamp);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
@@ -320,7 +296,9 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 
 /* Used in cleaning up workslot. */
 static __rte_always_inline uint16_t
-cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
+cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev,
+			    const uint32_t flags, void *lookup_mem,
+			    struct cnxk_timesync_info *tstamp)
 {
 	union {
 		__uint128_t get_work;
@@ -353,21 +331,9 @@ cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
-	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-		    (gw.u64[0] & 0xffffffff);
-
-	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
-		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		    RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					 gw.u64[0] & 0xFFFFF, 0, NULL);
-			gw.u64[1] = mbuf;
-		}
-	}
+	if (gw.u64[1])
+		cn9k_sso_hws_post_process(gw.u64, mbuf, flags, lookup_mem,
+					  tstamp);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
-- 
2.35.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 1/2] event/cnxk: add additional checks in OP_RELEASE
  2022-04-26 21:47 [PATCH 1/2] event/cnxk: add additional checks in OP_RELEASE Pavan Nikhilesh
  2022-04-26 21:47 ` [PATCH 2/2] event/cnxk: move post-processing to separate function Pavan Nikhilesh
@ 2022-04-27 10:58 ` Pavan Nikhilesh
  2022-04-27 10:58   ` [PATCH v2 2/2] event/cnxk: move post-processing to separate function Pavan Nikhilesh
  1 sibling, 1 reply; 5+ messages in thread
From: Pavan Nikhilesh @ 2022-04-27 10:58 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

Add additional checks while performing RTE_EVENT_OP_RELEASE to
ensure that there are no pending SWTAGs and FLUSHEs in flight.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 v2 Changes:
 - Fix compilation with RTE_LIBRTE_MEMPOOL_DEBUG enabled.

 drivers/event/cnxk/cn10k_eventdev.c |  4 +---
 drivers/event/cnxk/cn10k_worker.c   |  8 ++++++--
 drivers/event/cnxk/cn9k_eventdev.c  |  4 +---
 drivers/event/cnxk/cn9k_worker.c    | 16 ++++++++++++----
 drivers/event/cnxk/cn9k_worker.h    |  3 +--
 drivers/event/cnxk/cnxk_worker.h    | 17 ++++++++++++++---
 6 files changed, 35 insertions(+), 17 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 9b4d2895ec..2fa2cd31c2 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -137,9 +137,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
-			cnxk_sso_hws_swtag_flush(
-				ws->base + SSOW_LF_GWS_WQE0,
-				ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+			cnxk_sso_hws_swtag_flush(ws->base);
 		do {
 			val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
 		} while (val & BIT_ULL(56));
diff --git a/drivers/event/cnxk/cn10k_worker.c b/drivers/event/cnxk/cn10k_worker.c
index 975a22336a..0d99b4c5e5 100644
--- a/drivers/event/cnxk/cn10k_worker.c
+++ b/drivers/event/cnxk/cn10k_worker.c
@@ -18,8 +18,12 @@ cn10k_sso_hws_enq(void *port, const struct rte_event *ev)
 		cn10k_sso_hws_forward_event(ws, ev);
 		break;
 	case RTE_EVENT_OP_RELEASE:
-		cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_WQE0,
-					 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+		if (ws->swtag_req) {
+			cnxk_sso_hws_desched(ev->u64, ws->base);
+			ws->swtag_req = 0;
+			break;
+		}
+		cnxk_sso_hws_swtag_flush(ws->base);
 		break;
 	default:
 		return 0;
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 4bba477dd1..41bbe3cb22 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -156,9 +156,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
-			cnxk_sso_hws_swtag_flush(
-				ws_base + SSOW_LF_GWS_TAG,
-				ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+			cnxk_sso_hws_swtag_flush(ws_base);
 		do {
 			val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
 		} while (val & BIT_ULL(56));
diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c
index a981bc986f..41dbe6cafb 100644
--- a/drivers/event/cnxk/cn9k_worker.c
+++ b/drivers/event/cnxk/cn9k_worker.c
@@ -19,8 +19,12 @@ cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
 		cn9k_sso_hws_forward_event(ws, ev);
 		break;
 	case RTE_EVENT_OP_RELEASE:
-		cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_TAG,
-					 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+		if (ws->swtag_req) {
+			cnxk_sso_hws_desched(ev->u64, ws->base);
+			ws->swtag_req = 0;
+			break;
+		}
+		cnxk_sso_hws_swtag_flush(ws->base);
 		break;
 	default:
 		return 0;
@@ -78,8 +82,12 @@ cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev)
 		cn9k_sso_hws_dual_forward_event(dws, base, ev);
 		break;
 	case RTE_EVENT_OP_RELEASE:
-		cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
-					 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+		if (dws->swtag_req) {
+			cnxk_sso_hws_desched(ev->u64, base);
+			dws->swtag_req = 0;
+			break;
+		}
+		cnxk_sso_hws_swtag_flush(base);
 		break;
 	default:
 		return 0;
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 917d1e0b40..88eb4e9cf9 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -841,8 +841,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 			return 1;
 	}

-	cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
-				 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+	cnxk_sso_hws_swtag_flush(base);

 	return 1;
 }
diff --git a/drivers/event/cnxk/cnxk_worker.h b/drivers/event/cnxk/cnxk_worker.h
index 7de03f3fbb..5e5e96b1ec 100644
--- a/drivers/event/cnxk/cnxk_worker.h
+++ b/drivers/event/cnxk/cnxk_worker.h
@@ -45,11 +45,15 @@ cnxk_sso_hws_swtag_untag(uintptr_t swtag_untag_op)
 }

 static __rte_always_inline void
-cnxk_sso_hws_swtag_flush(uint64_t tag_op, uint64_t flush_op)
+cnxk_sso_hws_swtag_flush(uint64_t base)
 {
-	if (CNXK_TT_FROM_TAG(plt_read64(tag_op)) == SSO_TT_EMPTY)
+	/* Ensure that there is no previous flush is pending. */
+	while (plt_read64(base + SSOW_LF_GWS_PENDSTATE) & BIT_ULL(56))
+		;
+	if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG)) ==
+	    SSO_TT_EMPTY)
 		return;
-	plt_write64(0, flush_op);
+	plt_write64(0, base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
 }

 static __rte_always_inline uint64_t
@@ -78,4 +82,11 @@ cnxk_sso_hws_swtag_wait(uintptr_t tag_op)
 	return swtp;
 }

+static __rte_always_inline void
+cnxk_sso_hws_desched(uint64_t u64, uint64_t base)
+{
+	plt_write64(u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
+	plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+}
+
 #endif
--
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 2/2] event/cnxk: move post-processing to separate function
  2022-04-27 10:58 ` [PATCH v2 1/2] event/cnxk: add additional checks in OP_RELEASE Pavan Nikhilesh
@ 2022-04-27 10:58   ` Pavan Nikhilesh
  0 siblings, 0 replies; 5+ messages in thread
From: Pavan Nikhilesh @ 2022-04-27 10:58 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

Move event post-processing to a separate function.
Do complete event post-processing in tear-down functions to prevent
incorrect memory free.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c |   5 +-
 drivers/event/cnxk/cn10k_worker.h   | 189 +++++++++++++---------------
 drivers/event/cnxk/cn9k_eventdev.c  |   9 +-
 drivers/event/cnxk/cn9k_worker.h    | 114 ++++++-----------
 4 files changed, 137 insertions(+), 180 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 2fa2cd31c2..94829e789c 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -133,7 +133,10 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 
 	while (aq_cnt || cq_ds_cnt || ds_cnt) {
 		plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
-		cn10k_sso_hws_get_work_empty(ws, &ev);
+		cn10k_sso_hws_get_work_empty(
+			ws, &ev,
+			(NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
+				NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index c96048f47d..034f508dd8 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -196,15 +196,87 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 	}
 }
 
+static __rte_always_inline void
+cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
+			   const uint32_t flags)
+{
+	uint64_t tstamp_ptr;
+
+	u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
+		 (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
+	if ((flags & CPT_RX_WQE_F) &&
+	    (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
+		u64[1] = cn10k_cpt_crypto_adapter_dequeue(u64[1]);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
+		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
+		uint64_t mbuf;
+
+		mbuf = u64[1] - sizeof(struct rte_mbuf);
+		rte_prefetch0((void *)mbuf);
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			const uint64_t mbuf_init =
+				0x100010000ULL | RTE_PKTMBUF_HEADROOM |
+				(flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+			struct rte_mbuf *m;
+			uintptr_t sa_base;
+			uint64_t iova = 0;
+			uint8_t loff = 0;
+			uint16_t d_off;
+			uint64_t cq_w1;
+			uint64_t cq_w5;
+
+			m = (struct rte_mbuf *)mbuf;
+			d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
+			d_off += RTE_PKTMBUF_HEADROOM;
+
+			cq_w1 = *(uint64_t *)(u64[1] + 8);
+			cq_w5 = *(uint64_t *)(u64[1] + 40);
+
+			sa_base = cnxk_nix_sa_base_get(port, ws->lookup_mem);
+			sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+
+			mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
+				cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
+				(struct rte_mbuf *)mbuf, d_off, flags,
+				mbuf_init | ((uint64_t)port) << 48);
+			if (loff)
+				roc_npa_aura_op_free(m->pool->pool_id, 0, iova);
+		}
+
+		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
+		cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
+				  ws->lookup_mem);
+		/* Extracting tstamp, if PTP enabled*/
+		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
+					   CNXK_SSO_WQE_SG_PTR);
+		cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
+					 flags & NIX_RX_OFFLOAD_TSTAMP_F,
+					 (uint64_t *)tstamp_ptr);
+		u64[1] = mbuf;
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
+		   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
+		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
+		__uint128_t vwqe_hdr = *(__uint128_t *)u64[1];
+
+		vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
+			   ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
+		*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
+		cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
+				   ws->tstamp, ws->lmt_base);
+		/* Mark vector mempool object as get */
+		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
+					  (void **)&u64[1], 1, 1);
+	}
+}
+
 static __rte_always_inline uint16_t
 cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
-		       const uint32_t flags, void *lookup_mem)
+		       const uint32_t flags)
 {
 	union {
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
-	uint64_t tstamp_ptr;
 
 	gw.get_work = ws->gw_wdata;
 #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
@@ -222,83 +294,8 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
 	} while (gw.u64[0] & BIT_ULL(63));
 #endif
 	ws->gw_rdata = gw.u64[0];
-	if (gw.u64[1]) {
-		gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-			    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-			    (gw.u64[0] & 0xffffffff);
-		if ((flags & CPT_RX_WQE_F) &&
-		    (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		     RTE_EVENT_TYPE_CRYPTODEV)) {
-			gw.u64[1] = cn10k_cpt_crypto_adapter_dequeue(gw.u64[1]);
-		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-			   RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-			uint64_t mbuf;
-
-			mbuf = gw.u64[1] - sizeof(struct rte_mbuf);
-			rte_prefetch0((void *)mbuf);
-			if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
-				const uint64_t mbuf_init = 0x100010000ULL |
-					RTE_PKTMBUF_HEADROOM |
-					(flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
-				struct rte_mbuf *m;
-				uintptr_t sa_base;
-				uint64_t iova = 0;
-				uint8_t loff = 0;
-				uint16_t d_off;
-				uint64_t cq_w1;
-				uint64_t cq_w5;
-
-				m = (struct rte_mbuf *)mbuf;
-				d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
-				d_off += RTE_PKTMBUF_HEADROOM;
-
-				cq_w1 = *(uint64_t *)(gw.u64[1] + 8);
-				cq_w5 = *(uint64_t *)(gw.u64[1] + 40);
-
-				sa_base =
-					cnxk_nix_sa_base_get(port, lookup_mem);
-				sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
-
-				mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
-					cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
-					(struct rte_mbuf *)mbuf, d_off, flags,
-					mbuf_init | ((uint64_t)port) << 48);
-				if (loff)
-					roc_npa_aura_op_free(m->pool->pool_id,
-							     0, iova);
-			}
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					  gw.u64[0] & 0xFFFFF, flags,
-					  lookup_mem);
-			/* Extracting tstamp, if PTP enabled*/
-			tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
-							    gw.u64[1]) +
-						   CNXK_SSO_WQE_SG_PTR);
-			cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
-						ws->tstamp,
-						flags & NIX_RX_OFFLOAD_TSTAMP_F,
-						(uint64_t *)tstamp_ptr);
-			gw.u64[1] = mbuf;
-		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-			   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-			__uint128_t vwqe_hdr = *(__uint128_t *)gw.u64[1];
-
-			vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
-				   ((vwqe_hdr & 0xFFFF) << 48) |
-				   ((uint64_t)port << 32);
-			*(uint64_t *)gw.u64[1] = (uint64_t)vwqe_hdr;
-			cn10k_process_vwqe(gw.u64[1], port, flags, lookup_mem,
-					   ws->tstamp, ws->lmt_base);
-			/* Mark vector mempool object as get */
-			RTE_MEMPOOL_CHECK_COOKIES(
-				rte_mempool_from_obj((void *)gw.u64[1]),
-				(void **)&gw.u64[1], 1, 1);
-		}
-	}
+	if (gw.u64[1])
+		cn10k_sso_hws_post_process(ws, gw.u64, flags);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
@@ -308,13 +305,13 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
 
 /* Used in cleaning up workslot. */
 static __rte_always_inline uint16_t
-cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
+cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev,
+			     const uint32_t flags)
 {
 	union {
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
-	uint64_t mbuf;
 
 #ifdef RTE_ARCH_ARM64
 	asm volatile(PLT_CPU_FEATURE_PREAMBLE
@@ -325,9 +322,7 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
 		     "		ldp %[tag], %[wqp], [%[tag_loc]]	\n"
 		     "		tbnz %[tag], 63, rty%=			\n"
 		     "done%=:	dmb ld					\n"
-		     "		sub %[mbuf], %[wqp], #0x80		\n"
-		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
-		       [mbuf] "=&r"(mbuf)
+		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
 		     : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
 		     : "memory");
 #else
@@ -335,24 +330,11 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
 		roc_load_pair(gw.u64[0], gw.u64[1],
 			      ws->base + SSOW_LF_GWS_WQE0);
 	} while (gw.u64[0] & BIT_ULL(63));
-	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
-	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-		    (gw.u64[0] & 0xffffffff);
-
-	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
-		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		    RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					  gw.u64[0] & 0xFFFFF, 0, NULL);
-			gw.u64[1] = mbuf;
-		}
-	}
+	ws->gw_rdata = gw.u64[0];
+	if (gw.u64[1])
+		cn10k_sso_hws_post_process(ws, gw.u64, flags);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
@@ -471,7 +453,7 @@ NIX_RX_FASTPATH_MODES
 				ws->base + SSOW_LF_GWS_WQE0);                  \
 			return 1;                                              \
 		}                                                              \
-		return cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);  \
+		return cn10k_sso_hws_get_work(ws, ev, flags);                  \
 	}
 
 #define SSO_DEQ_SEG(fn, flags)	  SSO_DEQ(fn, flags | NIX_RX_MULTI_SEG_F)
@@ -491,10 +473,9 @@ NIX_RX_FASTPATH_MODES
 				ws->base + SSOW_LF_GWS_WQE0);                  \
 			return ret;                                            \
 		}                                                              \
-		ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);   \
+		ret = cn10k_sso_hws_get_work(ws, ev, flags);                   \
 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)     \
-			ret = cn10k_sso_hws_get_work(ws, ev, flags,            \
-						     ws->lookup_mem);          \
+			ret = cn10k_sso_hws_get_work(ws, ev, flags);           \
 		return ret;                                                    \
 	}
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 41bbe3cb22..987888d3db 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -122,6 +122,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 			  cnxk_handle_event_t fn, void *arg)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
+	struct cnxk_timesync_info *tstamp;
 	struct cn9k_sso_hws_dual *dws;
 	struct cn9k_sso_hws *ws;
 	uint64_t cq_ds_cnt = 1;
@@ -130,6 +131,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 	struct rte_event ev;
 	uintptr_t ws_base;
 	uint64_t val, req;
+	void *lookup_mem;
 
 	plt_write64(0, base + SSO_LF_GGRP_QCTL);
 
@@ -145,14 +147,19 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 	if (dev->dual_ws) {
 		dws = hws;
 		ws_base = dws->base[0];
+		lookup_mem = dws->lookup_mem;
+		tstamp = dws->tstamp;
 	} else {
 		ws = hws;
 		ws_base = ws->base;
+		lookup_mem = ws->lookup_mem;
+		tstamp = ws->tstamp;
 	}
 
 	while (aq_cnt || cq_ds_cnt || ds_cnt) {
 		plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
-		cn9k_sso_hws_get_work_empty(ws_base, &ev);
+		cn9k_sso_hws_get_work_empty(ws_base, &ev, dev->rx_offloads,
+					    lookup_mem, tstamp);
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 88eb4e9cf9..64e97e321a 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -169,6 +169,34 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
 			     mbuf_init | ((uint64_t)port_id) << 48, flags);
 }
 
+static __rte_always_inline void
+cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
+			  const void *const lookup_mem,
+			  struct cnxk_timesync_info *tstamp)
+{
+	uint64_t tstamp_ptr;
+
+	u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
+		 (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
+	if ((flags & CPT_RX_WQE_F) &&
+	    (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
+		u64[1] = cn9k_cpt_crypto_adapter_dequeue(u64[1]);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
+		uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
+
+		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
+		cn9k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
+				 lookup_mem);
+		/* Extracting tstamp, if PTP enabled*/
+		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
+					   CNXK_SSO_WQE_SG_PTR);
+		cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
+					flags & NIX_RX_OFFLOAD_TSTAMP_F,
+					(uint64_t *)tstamp_ptr);
+		u64[1] = mbuf;
+	}
+}
+
 static __rte_always_inline uint16_t
 cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
 			   struct rte_event *ev, const uint32_t flags,
@@ -178,7 +206,6 @@ cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
-	uint64_t tstamp_ptr;
 	uint64_t mbuf;
 
 	if (flags & NIX_RX_OFFLOAD_PTYPE_F)
@@ -207,34 +234,9 @@ cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
-	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-		    (gw.u64[0] & 0xffffffff);
-
-	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
-		if ((flags & CPT_RX_WQE_F) &&
-		    (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		     RTE_EVENT_TYPE_CRYPTODEV)) {
-			gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
-		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-			   RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					 gw.u64[0] & 0xFFFFF, flags,
-					 dws->lookup_mem);
-			/* Extracting tstamp, if PTP enabled*/
-			tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
-							    gw.u64[1]) +
-						   CNXK_SSO_WQE_SG_PTR);
-			cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
-						dws->tstamp,
-						flags & NIX_RX_OFFLOAD_TSTAMP_F,
-						(uint64_t *)tstamp_ptr);
-			gw.u64[1] = mbuf;
-		}
-	}
+	if (gw.u64[1])
+		cn9k_sso_hws_post_process(gw.u64, mbuf, flags, dws->lookup_mem,
+					  dws->tstamp);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
@@ -250,7 +252,6 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 		__uint128_t get_work;
 		uint64_t u64[2];
 	} gw;
-	uint64_t tstamp_ptr;
 	uint64_t mbuf;
 
 	plt_write64(ws->gw_wdata, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
@@ -283,34 +284,9 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
-	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-		    (gw.u64[0] & 0xffffffff);
-
-	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
-		if ((flags & CPT_RX_WQE_F) &&
-		    (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		     RTE_EVENT_TYPE_CRYPTODEV)) {
-			gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
-		} else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-			   RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					 gw.u64[0] & 0xFFFFF, flags,
-					 lookup_mem);
-			/* Extracting tstamp, if PTP enabled*/
-			tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
-							    gw.u64[1]) +
-						   CNXK_SSO_WQE_SG_PTR);
-			cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
-						ws->tstamp,
-						flags & NIX_RX_OFFLOAD_TSTAMP_F,
-						(uint64_t *)tstamp_ptr);
-			gw.u64[1] = mbuf;
-		}
-	}
+	if (gw.u64[1])
+		cn9k_sso_hws_post_process(gw.u64, mbuf, flags, lookup_mem,
+					  ws->tstamp);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
@@ -320,7 +296,9 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 
 /* Used in cleaning up workslot. */
 static __rte_always_inline uint16_t
-cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
+cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev,
+			    const uint32_t flags, void *lookup_mem,
+			    struct cnxk_timesync_info *tstamp)
 {
 	union {
 		__uint128_t get_work;
@@ -353,21 +331,9 @@ cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
-	gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
-		    (gw.u64[0] & (0x3FFull << 36)) << 4 |
-		    (gw.u64[0] & 0xffffffff);
-
-	if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
-		if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
-		    RTE_EVENT_TYPE_ETHDEV) {
-			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
-
-			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
-			cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
-					 gw.u64[0] & 0xFFFFF, 0, NULL);
-			gw.u64[1] = mbuf;
-		}
-	}
+	if (gw.u64[1])
+		cn9k_sso_hws_post_process(gw.u64, mbuf, flags, lookup_mem,
+					  tstamp);
 
 	ev->event = gw.u64[0];
 	ev->u64 = gw.u64[1];
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/2] event/cnxk: move post-processing to separate function
  2022-04-26 21:47 ` [PATCH 2/2] event/cnxk: move post-processing to separate function Pavan Nikhilesh
@ 2022-05-16 15:21   ` Jerin Jacob
  0 siblings, 0 replies; 5+ messages in thread
From: Jerin Jacob @ 2022-05-16 15:21 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: Jerin Jacob, Shijith Thotton, dpdk-dev

On Wed, Apr 27, 2022 at 3:17 AM Pavan Nikhilesh
<pbhagavatula@marvell.com> wrote:
>
> Move event post-processing to a separate function.
> Do complete event post-processing in tear-down functions to prevent
> incorrect memory free.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> ---
>  drivers/event/cnxk/cn10k_eventdev.c |   5 +-
>  drivers/event/cnxk/cn10k_worker.h   | 190 +++++++++++++---------------
>  drivers/event/cnxk/cn9k_eventdev.c  |   9 +-
>  drivers/event/cnxk/cn9k_worker.h    | 114 ++++++-----------
>  4 files changed, 138 insertions(+), 180 deletions(-)


Series applied to dpdk-next-net-eventdev/for-main. Thanks


> diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
> index 2fa2cd31c2..94829e789c 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -133,7 +133,10 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
>
>         while (aq_cnt || cq_ds_cnt || ds_cnt) {
>                 plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
> -               cn10k_sso_hws_get_work_empty(ws, &ev);
> +               cn10k_sso_hws_get_work_empty(
> +                       ws, &ev,
> +                       (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F |
> +                               NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F);
>                 if (fn != NULL && ev.u64 != 0)
>                         fn(arg, ev);
>                 if (ev.sched_type != SSO_TT_EMPTY)
> diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
> index c96048f47d..03bae4bd53 100644
> --- a/drivers/event/cnxk/cn10k_worker.h
> +++ b/drivers/event/cnxk/cn10k_worker.h
> @@ -196,15 +196,88 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
>         }
>  }
>
> +static __rte_always_inline void
> +cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
> +                          const uint32_t flags)
> +{
> +       uint64_t tstamp_ptr;
> +
> +       u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
> +                (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
> +       if ((flags & CPT_RX_WQE_F) &&
> +           (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
> +               u64[1] = cn10k_cpt_crypto_adapter_dequeue(u64[1]);
> +       } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
> +               uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
> +               uint64_t mbuf;
> +
> +               mbuf = u64[1] - sizeof(struct rte_mbuf);
> +               rte_prefetch0((void *)mbuf);
> +               if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
> +                       const uint64_t mbuf_init =
> +                               0x100010000ULL | RTE_PKTMBUF_HEADROOM |
> +                               (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
> +                       struct rte_mbuf *m;
> +                       uintptr_t sa_base;
> +                       uint64_t iova = 0;
> +                       uint8_t loff = 0;
> +                       uint16_t d_off;
> +                       uint64_t cq_w1;
> +                       uint64_t cq_w5;
> +
> +                       m = (struct rte_mbuf *)mbuf;
> +                       d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
> +                       d_off += RTE_PKTMBUF_HEADROOM;
> +
> +                       cq_w1 = *(uint64_t *)(u64[1] + 8);
> +                       cq_w5 = *(uint64_t *)(u64[1] + 40);
> +
> +                       sa_base = cnxk_nix_sa_base_get(port, ws->lookup_mem);
> +                       sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
> +
> +                       mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
> +                               cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
> +                               (struct rte_mbuf *)mbuf, d_off, flags,
> +                               mbuf_init | ((uint64_t)port) << 48);
> +                       if (loff)
> +                               roc_npa_aura_op_free(m->pool->pool_id, 0, iova);
> +               }
> +
> +               u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
> +               cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
> +                                 ws->lookup_mem);
> +               /* Extracting tstamp, if PTP enabled*/
> +               tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
> +                                          CNXK_SSO_WQE_SG_PTR);
> +               cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
> +                                        flags & NIX_RX_OFFLOAD_TSTAMP_F,
> +                                        (uint64_t *)tstamp_ptr);
> +               u64[1] = mbuf;
> +       } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
> +                  RTE_EVENT_TYPE_ETHDEV_VECTOR) {
> +               uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
> +               __uint128_t vwqe_hdr = *(__uint128_t *)u64[1];
> +
> +               vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
> +                          ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
> +               *(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
> +               cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
> +                                  ws->tstamp, ws->lmt_base);
> +               /* Mark vector mempool object as get */
> +               RTE_MEMPOOL_CHECK_COOKIES(
> +                       rte_mempool_from_obj((void *)gw.u64[1]),
> +                       (void **)&u64[1], 1, 1);
> +       }
> +}
> +
>  static __rte_always_inline uint16_t
>  cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
> -                      const uint32_t flags, void *lookup_mem)
> +                      const uint32_t flags)
>  {
>         union {
>                 __uint128_t get_work;
>                 uint64_t u64[2];
>         } gw;
> -       uint64_t tstamp_ptr;
>
>         gw.get_work = ws->gw_wdata;
>  #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
> @@ -222,83 +295,8 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
>         } while (gw.u64[0] & BIT_ULL(63));
>  #endif
>         ws->gw_rdata = gw.u64[0];
> -       if (gw.u64[1]) {
> -               gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
> -                           (gw.u64[0] & (0x3FFull << 36)) << 4 |
> -                           (gw.u64[0] & 0xffffffff);
> -               if ((flags & CPT_RX_WQE_F) &&
> -                   (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                    RTE_EVENT_TYPE_CRYPTODEV)) {
> -                       gw.u64[1] = cn10k_cpt_crypto_adapter_dequeue(gw.u64[1]);
> -               } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                          RTE_EVENT_TYPE_ETHDEV) {
> -                       uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
> -                       uint64_t mbuf;
> -
> -                       mbuf = gw.u64[1] - sizeof(struct rte_mbuf);
> -                       rte_prefetch0((void *)mbuf);
> -                       if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
> -                               const uint64_t mbuf_init = 0x100010000ULL |
> -                                       RTE_PKTMBUF_HEADROOM |
> -                                       (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
> -                               struct rte_mbuf *m;
> -                               uintptr_t sa_base;
> -                               uint64_t iova = 0;
> -                               uint8_t loff = 0;
> -                               uint16_t d_off;
> -                               uint64_t cq_w1;
> -                               uint64_t cq_w5;
> -
> -                               m = (struct rte_mbuf *)mbuf;
> -                               d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
> -                               d_off += RTE_PKTMBUF_HEADROOM;
> -
> -                               cq_w1 = *(uint64_t *)(gw.u64[1] + 8);
> -                               cq_w5 = *(uint64_t *)(gw.u64[1] + 40);
> -
> -                               sa_base =
> -                                       cnxk_nix_sa_base_get(port, lookup_mem);
> -                               sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
> -
> -                               mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
> -                                       cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
> -                                       (struct rte_mbuf *)mbuf, d_off, flags,
> -                                       mbuf_init | ((uint64_t)port) << 48);
> -                               if (loff)
> -                                       roc_npa_aura_op_free(m->pool->pool_id,
> -                                                            0, iova);
> -                       }
> -
> -                       gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
> -                       cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
> -                                         gw.u64[0] & 0xFFFFF, flags,
> -                                         lookup_mem);
> -                       /* Extracting tstamp, if PTP enabled*/
> -                       tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
> -                                                           gw.u64[1]) +
> -                                                  CNXK_SSO_WQE_SG_PTR);
> -                       cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
> -                                               ws->tstamp,
> -                                               flags & NIX_RX_OFFLOAD_TSTAMP_F,
> -                                               (uint64_t *)tstamp_ptr);
> -                       gw.u64[1] = mbuf;
> -               } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                          RTE_EVENT_TYPE_ETHDEV_VECTOR) {
> -                       uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
> -                       __uint128_t vwqe_hdr = *(__uint128_t *)gw.u64[1];
> -
> -                       vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
> -                                  ((vwqe_hdr & 0xFFFF) << 48) |
> -                                  ((uint64_t)port << 32);
> -                       *(uint64_t *)gw.u64[1] = (uint64_t)vwqe_hdr;
> -                       cn10k_process_vwqe(gw.u64[1], port, flags, lookup_mem,
> -                                          ws->tstamp, ws->lmt_base);
> -                       /* Mark vector mempool object as get */
> -                       RTE_MEMPOOL_CHECK_COOKIES(
> -                               rte_mempool_from_obj((void *)gw.u64[1]),
> -                               (void **)&gw.u64[1], 1, 1);
> -               }
> -       }
> +       if (gw.u64[1])
> +               cn10k_sso_hws_post_process(ws, gw.u64, flags);
>
>         ev->event = gw.u64[0];
>         ev->u64 = gw.u64[1];
> @@ -308,13 +306,13 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
>
>  /* Used in cleaning up workslot. */
>  static __rte_always_inline uint16_t
> -cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
> +cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev,
> +                            const uint32_t flags)
>  {
>         union {
>                 __uint128_t get_work;
>                 uint64_t u64[2];
>         } gw;
> -       uint64_t mbuf;
>
>  #ifdef RTE_ARCH_ARM64
>         asm volatile(PLT_CPU_FEATURE_PREAMBLE
> @@ -325,9 +323,7 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
>                      "          ldp %[tag], %[wqp], [%[tag_loc]]        \n"
>                      "          tbnz %[tag], 63, rty%=                  \n"
>                      "done%=:   dmb ld                                  \n"
> -                    "          sub %[mbuf], %[wqp], #0x80              \n"
> -                    : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
> -                      [mbuf] "=&r"(mbuf)
> +                    : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
>                      : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
>                      : "memory");
>  #else
> @@ -335,24 +331,11 @@ cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
>                 roc_load_pair(gw.u64[0], gw.u64[1],
>                               ws->base + SSOW_LF_GWS_WQE0);
>         } while (gw.u64[0] & BIT_ULL(63));
> -       mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
>  #endif
>
> -       gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
> -                   (gw.u64[0] & (0x3FFull << 36)) << 4 |
> -                   (gw.u64[0] & 0xffffffff);
> -
> -       if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
> -               if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                   RTE_EVENT_TYPE_ETHDEV) {
> -                       uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
> -
> -                       gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
> -                       cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
> -                                         gw.u64[0] & 0xFFFFF, 0, NULL);
> -                       gw.u64[1] = mbuf;
> -               }
> -       }
> +       ws->gw_rdata = gw.u64[0];
> +       if (gw.u64[1])
> +               cn10k_sso_hws_post_process(ws, gw.u64, flags);
>
>         ev->event = gw.u64[0];
>         ev->u64 = gw.u64[1];
> @@ -471,7 +454,7 @@ NIX_RX_FASTPATH_MODES
>                                 ws->base + SSOW_LF_GWS_WQE0);                  \
>                         return 1;                                              \
>                 }                                                              \
> -               return cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);  \
> +               return cn10k_sso_hws_get_work(ws, ev, flags);                  \
>         }
>
>  #define SSO_DEQ_SEG(fn, flags)   SSO_DEQ(fn, flags | NIX_RX_MULTI_SEG_F)
> @@ -491,10 +474,9 @@ NIX_RX_FASTPATH_MODES
>                                 ws->base + SSOW_LF_GWS_WQE0);                  \
>                         return ret;                                            \
>                 }                                                              \
> -               ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem);   \
> +               ret = cn10k_sso_hws_get_work(ws, ev, flags);                   \
>                 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)     \
> -                       ret = cn10k_sso_hws_get_work(ws, ev, flags,            \
> -                                                    ws->lookup_mem);          \
> +                       ret = cn10k_sso_hws_get_work(ws, ev, flags);           \
>                 return ret;                                                    \
>         }
>
> diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
> index 41bbe3cb22..987888d3db 100644
> --- a/drivers/event/cnxk/cn9k_eventdev.c
> +++ b/drivers/event/cnxk/cn9k_eventdev.c
> @@ -122,6 +122,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
>                           cnxk_handle_event_t fn, void *arg)
>  {
>         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
> +       struct cnxk_timesync_info *tstamp;
>         struct cn9k_sso_hws_dual *dws;
>         struct cn9k_sso_hws *ws;
>         uint64_t cq_ds_cnt = 1;
> @@ -130,6 +131,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
>         struct rte_event ev;
>         uintptr_t ws_base;
>         uint64_t val, req;
> +       void *lookup_mem;
>
>         plt_write64(0, base + SSO_LF_GGRP_QCTL);
>
> @@ -145,14 +147,19 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
>         if (dev->dual_ws) {
>                 dws = hws;
>                 ws_base = dws->base[0];
> +               lookup_mem = dws->lookup_mem;
> +               tstamp = dws->tstamp;
>         } else {
>                 ws = hws;
>                 ws_base = ws->base;
> +               lookup_mem = ws->lookup_mem;
> +               tstamp = ws->tstamp;
>         }
>
>         while (aq_cnt || cq_ds_cnt || ds_cnt) {
>                 plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
> -               cn9k_sso_hws_get_work_empty(ws_base, &ev);
> +               cn9k_sso_hws_get_work_empty(ws_base, &ev, dev->rx_offloads,
> +                                           lookup_mem, tstamp);
>                 if (fn != NULL && ev.u64 != 0)
>                         fn(arg, ev);
>                 if (ev.sched_type != SSO_TT_EMPTY)
> diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
> index 88eb4e9cf9..64e97e321a 100644
> --- a/drivers/event/cnxk/cn9k_worker.h
> +++ b/drivers/event/cnxk/cn9k_worker.h
> @@ -169,6 +169,34 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
>                              mbuf_init | ((uint64_t)port_id) << 48, flags);
>  }
>
> +static __rte_always_inline void
> +cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
> +                         const void *const lookup_mem,
> +                         struct cnxk_timesync_info *tstamp)
> +{
> +       uint64_t tstamp_ptr;
> +
> +       u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
> +                (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
> +       if ((flags & CPT_RX_WQE_F) &&
> +           (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_CRYPTODEV)) {
> +               u64[1] = cn9k_cpt_crypto_adapter_dequeue(u64[1]);
> +       } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
> +               uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
> +
> +               u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
> +               cn9k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
> +                                lookup_mem);
> +               /* Extracting tstamp, if PTP enabled*/
> +               tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
> +                                          CNXK_SSO_WQE_SG_PTR);
> +               cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
> +                                       flags & NIX_RX_OFFLOAD_TSTAMP_F,
> +                                       (uint64_t *)tstamp_ptr);
> +               u64[1] = mbuf;
> +       }
> +}
> +
>  static __rte_always_inline uint16_t
>  cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
>                            struct rte_event *ev, const uint32_t flags,
> @@ -178,7 +206,6 @@ cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
>                 __uint128_t get_work;
>                 uint64_t u64[2];
>         } gw;
> -       uint64_t tstamp_ptr;
>         uint64_t mbuf;
>
>         if (flags & NIX_RX_OFFLOAD_PTYPE_F)
> @@ -207,34 +234,9 @@ cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
>         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
>  #endif
>
> -       gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
> -                   (gw.u64[0] & (0x3FFull << 36)) << 4 |
> -                   (gw.u64[0] & 0xffffffff);
> -
> -       if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
> -               if ((flags & CPT_RX_WQE_F) &&
> -                   (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                    RTE_EVENT_TYPE_CRYPTODEV)) {
> -                       gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
> -               } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                          RTE_EVENT_TYPE_ETHDEV) {
> -                       uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
> -
> -                       gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
> -                       cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
> -                                        gw.u64[0] & 0xFFFFF, flags,
> -                                        dws->lookup_mem);
> -                       /* Extracting tstamp, if PTP enabled*/
> -                       tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
> -                                                           gw.u64[1]) +
> -                                                  CNXK_SSO_WQE_SG_PTR);
> -                       cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
> -                                               dws->tstamp,
> -                                               flags & NIX_RX_OFFLOAD_TSTAMP_F,
> -                                               (uint64_t *)tstamp_ptr);
> -                       gw.u64[1] = mbuf;
> -               }
> -       }
> +       if (gw.u64[1])
> +               cn9k_sso_hws_post_process(gw.u64, mbuf, flags, dws->lookup_mem,
> +                                         dws->tstamp);
>
>         ev->event = gw.u64[0];
>         ev->u64 = gw.u64[1];
> @@ -250,7 +252,6 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
>                 __uint128_t get_work;
>                 uint64_t u64[2];
>         } gw;
> -       uint64_t tstamp_ptr;
>         uint64_t mbuf;
>
>         plt_write64(ws->gw_wdata, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
> @@ -283,34 +284,9 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
>         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
>  #endif
>
> -       gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
> -                   (gw.u64[0] & (0x3FFull << 36)) << 4 |
> -                   (gw.u64[0] & 0xffffffff);
> -
> -       if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
> -               if ((flags & CPT_RX_WQE_F) &&
> -                   (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                    RTE_EVENT_TYPE_CRYPTODEV)) {
> -                       gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
> -               } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                          RTE_EVENT_TYPE_ETHDEV) {
> -                       uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
> -
> -                       gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
> -                       cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
> -                                        gw.u64[0] & 0xFFFFF, flags,
> -                                        lookup_mem);
> -                       /* Extracting tstamp, if PTP enabled*/
> -                       tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
> -                                                           gw.u64[1]) +
> -                                                  CNXK_SSO_WQE_SG_PTR);
> -                       cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
> -                                               ws->tstamp,
> -                                               flags & NIX_RX_OFFLOAD_TSTAMP_F,
> -                                               (uint64_t *)tstamp_ptr);
> -                       gw.u64[1] = mbuf;
> -               }
> -       }
> +       if (gw.u64[1])
> +               cn9k_sso_hws_post_process(gw.u64, mbuf, flags, lookup_mem,
> +                                         ws->tstamp);
>
>         ev->event = gw.u64[0];
>         ev->u64 = gw.u64[1];
> @@ -320,7 +296,9 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
>
>  /* Used in cleaning up workslot. */
>  static __rte_always_inline uint16_t
> -cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
> +cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev,
> +                           const uint32_t flags, void *lookup_mem,
> +                           struct cnxk_timesync_info *tstamp)
>  {
>         union {
>                 __uint128_t get_work;
> @@ -353,21 +331,9 @@ cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
>         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
>  #endif
>
> -       gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
> -                   (gw.u64[0] & (0x3FFull << 36)) << 4 |
> -                   (gw.u64[0] & 0xffffffff);
> -
> -       if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
> -               if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
> -                   RTE_EVENT_TYPE_ETHDEV) {
> -                       uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
> -
> -                       gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
> -                       cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
> -                                        gw.u64[0] & 0xFFFFF, 0, NULL);
> -                       gw.u64[1] = mbuf;
> -               }
> -       }
> +       if (gw.u64[1])
> +               cn9k_sso_hws_post_process(gw.u64, mbuf, flags, lookup_mem,
> +                                         tstamp);
>
>         ev->event = gw.u64[0];
>         ev->u64 = gw.u64[1];
> --
> 2.35.1
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-05-16 15:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-26 21:47 [PATCH 1/2] event/cnxk: add additional checks in OP_RELEASE Pavan Nikhilesh
2022-04-26 21:47 ` [PATCH 2/2] event/cnxk: move post-processing to separate function Pavan Nikhilesh
2022-05-16 15:21   ` Jerin Jacob
2022-04-27 10:58 ` [PATCH v2 1/2] event/cnxk: add additional checks in OP_RELEASE Pavan Nikhilesh
2022-04-27 10:58   ` [PATCH v2 2/2] event/cnxk: move post-processing to separate function Pavan Nikhilesh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).