DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/octeontx2: fix handling indirect mbufs during TX
@ 2019-07-25  5:43 Sunil Kumar Kori
  2019-07-25  8:20 ` [dpdk-dev] [PATCH v2 1/1] net/octeontx2: fix handling indirect mbufs during Tx Sunil Kumar Kori
  0 siblings, 1 reply; 4+ messages in thread
From: Sunil Kumar Kori @ 2019-07-25  5:43 UTC (permalink / raw)
  To: Jerin Jacob, Nithin Dabilpuram, Kiran Kumar K; +Cc: dev, Sunil Kumar Kori

Multi segmented packet may be spliced with indirect mbufs also.
Currently driver causes buffer leak for indirect mbufs as they
were not being freed to packet pool.

Patch fixes handling of indirect mbufs for following use cases:
- packet contains all indirect mbufs only.
- packet contains mixed mbufs i.e. direct and indirect both.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/net/octeontx2/otx2_tx.c |  8 ++--
 drivers/net/octeontx2/otx2_tx.h | 82 +++++++++++++++++++++++++++++++++++++----
 2 files changed, 79 insertions(+), 11 deletions(-)

diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c
index 6bce551..0dcadff 100644
--- a/drivers/net/octeontx2/otx2_tx.c
+++ b/drivers/net/octeontx2/otx2_tx.c
@@ -178,7 +178,7 @@
 			mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
 				offsetof(struct rte_mbuf, buf_iova));
 
-			if (rte_pktmbuf_prefree_seg(mbuf) == NULL)
+			if (otx2_nix_prefree_seg(mbuf))
 				vsetq_lane_u64(0x80000, xmask01, 0);
 			else
 				__mempool_check_cookies(mbuf->pool,
@@ -187,7 +187,7 @@
 
 			mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
 				offsetof(struct rte_mbuf, buf_iova));
-			if (rte_pktmbuf_prefree_seg(mbuf) == NULL)
+			if (otx2_nix_prefree_seg(mbuf))
 				vsetq_lane_u64(0x80000, xmask01, 1);
 			else
 				__mempool_check_cookies(mbuf->pool,
@@ -196,7 +196,7 @@
 
 			mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
 				offsetof(struct rte_mbuf, buf_iova));
-			if (rte_pktmbuf_prefree_seg(mbuf) == NULL)
+			if (otx2_nix_prefree_seg(mbuf))
 				vsetq_lane_u64(0x80000, xmask23, 0);
 			else
 				__mempool_check_cookies(mbuf->pool,
@@ -205,7 +205,7 @@
 
 			mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
 				offsetof(struct rte_mbuf, buf_iova));
-			if (rte_pktmbuf_prefree_seg(mbuf) == NULL)
+			if (otx2_nix_prefree_seg(mbuf))
 				vsetq_lane_u64(0x80000, xmask23, 1);
 			else
 				__mempool_check_cookies(mbuf->pool,
diff --git a/drivers/net/octeontx2/otx2_tx.h b/drivers/net/octeontx2/otx2_tx.h
index b75a220..87e747f 100644
--- a/drivers/net/octeontx2/otx2_tx.h
+++ b/drivers/net/octeontx2/otx2_tx.h
@@ -58,6 +58,72 @@
 	}
 }
 
+static __rte_always_inline uint64_t
+otx2_pktmbuf_detach(struct rte_mbuf *m)
+{
+	struct rte_mempool *mp = m->pool;
+	uint32_t mbuf_size, buf_len;
+	struct rte_mbuf *md;
+	uint16_t priv_size;
+	uint16_t refcount;
+
+	/* Update refcount of direct mbuf */
+	md = rte_mbuf_from_indirect(m);
+	refcount = rte_mbuf_refcnt_update(md, -1);
+
+	priv_size = rte_pktmbuf_priv_size(mp);
+	mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
+	buf_len = rte_pktmbuf_data_room_size(mp);
+
+	m->priv_size = priv_size;
+	m->buf_addr = (char *)m + mbuf_size;
+	m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+	m->buf_len = (uint16_t)buf_len;
+	rte_pktmbuf_reset_headroom(m);
+	m->data_len = 0;
+	m->ol_flags = 0;
+	m->next = NULL;
+	m->nb_segs = 1;
+
+	/* Now indirect mbuf is safe to free */
+	rte_pktmbuf_free(m);
+
+	if (refcount == 0) {
+		rte_mbuf_refcnt_set(md, 1);
+		md->data_len = 0;
+		md->ol_flags = 0;
+		md->next = NULL;
+		md->nb_segs = 1;
+		return 0;
+	} else {
+		return 1;
+	}
+}
+
+static __rte_always_inline uint64_t
+otx2_nix_prefree_seg(struct rte_mbuf *m)
+{
+	if (likely(rte_mbuf_refcnt_read(m) == 1)) {
+		if (!RTE_MBUF_DIRECT(m))
+			return otx2_pktmbuf_detach(m);
+
+		m->next = NULL;
+		m->nb_segs = 1;
+		return 0;
+	} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
+		if (!RTE_MBUF_DIRECT(m))
+			return otx2_pktmbuf_detach(m);
+
+		rte_mbuf_refcnt_set(m, 1);
+		m->next = NULL;
+		m->nb_segs = 1;
+		return 0;
+	}
+
+	/* Mbuf is having refcount more than 1 so need not to be freed */
+	return 1;
+}
+
 static inline void
 otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 {
@@ -189,9 +255,11 @@
 		*(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
 
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-			/* Set don't free bit if reference count > 1 */
-			if (rte_pktmbuf_prefree_seg(m) == NULL)
-				send_hdr->w0.df = 1; /* SET DF */
+			/* DF bit = 1 if refcount of current mbuf or parent mbuf
+			 *		is greater than 1
+			 * DF bit = 0 otherwise
+			 */
+			send_hdr->w0.df = otx2_nix_prefree_seg(m);
 		}
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
@@ -233,6 +301,8 @@
 		off = 0;
 
 	sg = (union nix_send_sg_s *)&cmd[2 + off];
+	/* Clear sg->u header before use */
+	sg->u &= 0xFC00000000000000;
 	sg_u = sg->u;
 	slist = &cmd[3 + off];
 
@@ -244,11 +314,9 @@
 		m_next = m->next;
 		sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
 		*slist = rte_mbuf_data_iova(m);
-		/* Set invert df if reference count > 1 */
+		/* Set invert df if buffer is not to be freed by H/W */
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-			sg_u |=
-			((uint64_t)(rte_pktmbuf_prefree_seg(m) == NULL) <<
-			 (i + 55));
+			sg_u |=	(otx2_nix_prefree_seg(m) << (i + 55));
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!(sg_u & (1ULL << (i + 55)))) {
 			m->next = NULL;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-07-28 16:08 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-25  5:43 [dpdk-dev] [PATCH] net/octeontx2: fix handling indirect mbufs during TX Sunil Kumar Kori
2019-07-25  8:20 ` [dpdk-dev] [PATCH v2 1/1] net/octeontx2: fix handling indirect mbufs during Tx Sunil Kumar Kori
2019-07-27 13:47   ` [dpdk-dev] [EXT] " Nithin Kumar Dabilpuram
2019-07-28 16:08     ` Jerin Jacob Kollanukkaran

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).