patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH 23.11] net/octeon_ep: fix mbuf data offset update
@ 2025-12-30  7:23 Vamsi Krishna
  0 siblings, 0 replies; only message in thread
From: Vamsi Krishna @ 2025-12-30  7:23 UTC (permalink / raw)
  To: stable; +Cc: shperetz, Vamsi Attunuru

From: Vamsi Attunuru <vattunuru@marvell.com>

[ upstream commit 74348d7ad2503960611e979677acad4b4641e7dd ]

Buffer refill routine uses mempool API instead of pktmbuf
alloc to avoid mbuf reset. Patch uses rearm to update the
mbuf fields. Also it removes redundant refill count update.

Fixes: 35dee56cee00 ("net/octeon_ep: add new fastpath routines")

Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
 drivers/net/octeon_ep/cnxk_ep_rx.c    |  9 ++++-----
 drivers/net/octeon_ep/otx_ep_common.h |  3 +++
 drivers/net/octeon_ep/otx_ep_rxtx.c   | 27 +++++++++++++++++++++++++++
 3 files changed, 34 insertions(+), 5 deletions(-)

diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.c b/drivers/net/octeon_ep/cnxk_ep_rx.c
index 41b369bd6b..10ab0ceaaa 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.c
@@ -6,6 +6,8 @@
 #include "otx2_ep_vf.h"
 #include "otx_ep_rxtx.h"
 
+#define cnxk_pktmbuf_mtod(m, t) ((t)(void *)((char *)(m)->buf_addr + RTE_PKTMBUF_HEADROOM))
+
 static inline int
 cnxk_ep_rx_refill_mbuf(struct otx_ep_droq *droq, uint32_t count)
 {
@@ -163,7 +165,6 @@ cnxk_ep_process_pkts_scalar_mseg(struct rte_mbuf **rx_pkts, struct otx_ep_droq *
 {
 	struct rte_mbuf **recv_buf_list = droq->recv_buf_list;
 	uint32_t total_pkt_len, bytes_rsvd = 0;
-	uint16_t port_id = droq->otx_ep_dev->port_id;
 	uint16_t nb_desc = droq->nb_desc;
 	uint16_t pkts;
 
@@ -175,7 +176,7 @@ cnxk_ep_process_pkts_scalar_mseg(struct rte_mbuf **rx_pkts, struct otx_ep_droq *
 		uint32_t pkt_len = 0;
 
 		mbuf = recv_buf_list[droq->read_idx];
-		info = rte_pktmbuf_mtod(mbuf, struct otx_ep_droq_info *);
+		info = cnxk_pktmbuf_mtod(mbuf, struct otx_ep_droq_info *);
 
 		total_pkt_len = rte_bswap16(info->length >> 48) + OTX_EP_INFO_SIZE;
 
@@ -190,7 +191,7 @@ cnxk_ep_process_pkts_scalar_mseg(struct rte_mbuf **rx_pkts, struct otx_ep_droq *
 			if (!pkt_len) {
 				/* Note the first seg */
 				first_buf = mbuf;
-				mbuf->data_off += OTX_EP_INFO_SIZE;
+				*(uint64_t *)&mbuf->rearm_data = droq->rearm_data;
 				mbuf->pkt_len = cpy_len - OTX_EP_INFO_SIZE;
 				mbuf->data_len = cpy_len - OTX_EP_INFO_SIZE;
 			} else {
@@ -210,12 +211,10 @@ cnxk_ep_process_pkts_scalar_mseg(struct rte_mbuf **rx_pkts, struct otx_ep_droq *
 			droq->refill_count++;
 		}
 		mbuf = first_buf;
-		mbuf->port = port_id;
 		rx_pkts[pkts] = mbuf;
 		bytes_rsvd += pkt_len;
 	}
 
-	droq->refill_count += new_pkts;
 	droq->pkts_pending -= pkts;
 	/* Stats */
 	droq->stats.pkts_received += pkts;
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index ccd4483058..2b93cabc13 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -365,6 +365,9 @@ struct otx_ep_droq {
 	/* receive buffer list contains mbuf ptr list */
 	struct rte_mbuf **recv_buf_list;
 
+	/* Packet re-arm data. */
+	uint64_t rearm_data;
+
 	/* Packets pending to be processed */
 	uint64_t pkts_pending;
 
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 65a1f304e8..429579ba16 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -284,6 +284,32 @@ otx_ep_droq_setup_ring_buffers(struct otx_ep_droq *droq)
 	return 0;
 }
 
+static inline uint64_t
+otx_ep_set_rearm_data(struct otx_ep_device *otx_ep)
+{
+	uint16_t port_id = otx_ep->port_id;
+	struct rte_mbuf mb_def;
+	uint64_t *tmp;
+
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) - offsetof(struct rte_mbuf, data_off) !=
+			 2);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) - offsetof(struct rte_mbuf, data_off) !=
+			 4);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) - offsetof(struct rte_mbuf, data_off) !=
+			 6);
+	mb_def.nb_segs = 1;
+	mb_def.data_off = RTE_PKTMBUF_HEADROOM + OTX_EP_INFO_SIZE;
+	mb_def.port = port_id;
+	rte_mbuf_refcnt_set(&mb_def, 1);
+
+	/* Prevent compiler reordering: rearm_data covers previous fields */
+	rte_compiler_barrier();
+	tmp = (uint64_t *)&mb_def.rearm_data;
+
+	return *tmp;
+}
+
 /* OQ initialization */
 static int
 otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
@@ -340,6 +366,7 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
 		goto init_droq_fail;
 
 	droq->refill_threshold = c_refill_threshold;
+	droq->rearm_data = otx_ep_set_rearm_data(otx_ep);
 
 	/* Set up OQ registers */
 	ret = otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
-- 
2.34.1


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2025-12-30  7:23 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-12-30  7:23 [PATCH 23.11] net/octeon_ep: fix mbuf data offset update Vamsi Krishna

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).