DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1 1/3] net/cnxk: rework no-fast-free offload handling
@ 2022-11-17  7:25 Ashwin Sekhar T K
  2022-11-17  7:25 ` [PATCH v1 2/3] net/cnxk: add sg2 descriptor support Ashwin Sekhar T K
  2022-11-17  7:25 ` [PATCH v1 3/3] net/cnxk: add debug check for number of Tx descriptors Ashwin Sekhar T K
  0 siblings, 2 replies; 4+ messages in thread
From: Ashwin Sekhar T K @ 2022-11-17  7:25 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, pbhagavatula, psatheesh, asekhar, anoobj, gakhil, hkalra

Add a separate routine to handle no-fast-free offload
in vector Tx path for multisegmented packets.

Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 124 +++++++++++++++++-------------------
 1 file changed, 59 insertions(+), 65 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 815cd2ff1f..a4c578354c 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -956,6 +956,14 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 	rte_io_wmb();
 #endif
 	m->next = NULL;
+
+	/* Quickly handle single segmented packets. With this if-condition
+	 * compiler will completely optimize out the below do-while loop
+	 * from the Tx handler when NIX_TX_MULTI_SEG_F offload is not set.
+	 */
+	if (!(flags & NIX_TX_MULTI_SEG_F))
+		goto done;
+
 	m = m_next;
 	if (!m)
 		goto done;
@@ -1360,6 +1368,30 @@ cn10k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
 	}
 }
 
+static __rte_always_inline uint16_t
+cn10k_nix_prepare_mseg_vec_noff(struct rte_mbuf *m, uint64_t *cmd,
+				uint64x2_t *cmd0, uint64x2_t *cmd1,
+				uint64x2_t *cmd2, uint64x2_t *cmd3,
+				const uint32_t flags)
+{
+	uint16_t segdw;
+
+	vst1q_u64(cmd, *cmd0); /* Send hdr */
+	if (flags & NIX_TX_NEED_EXT_HDR) {
+		vst1q_u64(cmd + 2, *cmd2); /* ext hdr */
+		vst1q_u64(cmd + 4, *cmd1); /* sg */
+	} else {
+		vst1q_u64(cmd + 2, *cmd1); /* sg */
+	}
+
+	segdw = cn10k_nix_prepare_mseg(m, cmd, flags);
+
+	if (flags & NIX_TX_OFFLOAD_TSTAMP_F)
+		vst1q_u64(cmd + segdw * 2 - 2, *cmd3);
+
+	return segdw;
+}
+
 static __rte_always_inline void
 cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 				union nix_send_hdr_w0_u *sh,
@@ -1389,17 +1421,6 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
-
-	/* Set invert df if buffer is not to be freed by H/W */
-	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-		sg_u |= (cnxk_nix_prefree_seg(m) << 55);
-		/* Mark mempool object as "put" since it is freed by NIX */
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-	if (!(sg_u & (1ULL << 55)))
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
-	rte_io_wmb();
-#endif
-
 	m->next = NULL;
 	m = m_next;
 	/* Fill mbuf segments */
@@ -1409,16 +1430,6 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 		len -= dlen;
 		sg_u = sg_u | ((uint64_t)dlen << (i << 4));
 		*slist = rte_mbuf_data_iova(m);
-		/* Set invert df if buffer is not to be freed by H/W */
-		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-			sg_u |= (cnxk_nix_prefree_seg(m) << (i + 55));
-			/* Mark mempool object as "put" since it is freed by NIX
-			 */
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-		if (!(sg_u & (1ULL << (i + 55))))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
-		rte_io_wmb();
-#endif
 		slist++;
 		i++;
 		nb_segs--;
@@ -1456,21 +1467,8 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
 	union nix_send_hdr_w0_u sh;
 	union nix_send_sg_s sg;
 
-	if (m->nb_segs == 1) {
-		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-			sg.u = vgetq_lane_u64(cmd1[0], 0);
-			sg.u |= (cnxk_nix_prefree_seg(m) << 55);
-			cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0);
-		}
-
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-		sg.u = vgetq_lane_u64(cmd1[0], 0);
-		if (!(sg.u & (1ULL << 55)))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
-		rte_io_wmb();
-#endif
+	if (m->nb_segs == 1)
 		return;
-	}
 
 	sh.u = vgetq_lane_u64(cmd0[0], 0);
 	sg.u = vgetq_lane_u64(cmd1[0], 0);
@@ -1491,16 +1489,32 @@ cn10k_nix_prep_lmt_mseg_vector(struct rte_mbuf **mbufs, uint64x2_t *cmd0,
 			       uint64_t *lmt_addr, __uint128_t *data128,
 			       uint8_t *shift, const uint16_t flags)
 {
-	uint8_t j, off, lmt_used;
+	uint8_t j, off, lmt_used = 0;
+
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+		off = 0;
+		for (j = 0; j < NIX_DESCS_PER_LOOP; j++) {
+			if (off + segdw[j] > 8) {
+				*data128 |= ((__uint128_t)off - 1) << *shift;
+				*shift += 3;
+				lmt_used++;
+				lmt_addr += 16;
+				off = 0;
+			}
+			off += cn10k_nix_prepare_mseg_vec_noff(mbufs[j],
+					lmt_addr + off * 2, &cmd0[j], &cmd1[j],
+					&cmd2[j], &cmd3[j], flags);
+		}
+		*data128 |= ((__uint128_t)off - 1) << *shift;
+		*shift += 3;
+		lmt_used++;
+		return lmt_used;
+	}
 
 	if (!(flags & NIX_TX_NEED_EXT_HDR) &&
 	    !(flags & NIX_TX_OFFLOAD_TSTAMP_F)) {
 		/* No segments in 4 consecutive packets. */
 		if ((segdw[0] + segdw[1] + segdw[2] + segdw[3]) <= 8) {
-			for (j = 0; j < NIX_DESCS_PER_LOOP; j++)
-				cn10k_nix_prepare_mseg_vec(mbufs[j], NULL,
-							   &cmd0[j], &cmd1[j],
-							   segdw[j], flags);
 			vst1q_u64(lmt_addr, cmd0[0]);
 			vst1q_u64(lmt_addr + 2, cmd1[0]);
 			vst1q_u64(lmt_addr + 4, cmd0[1]);
@@ -1517,18 +1531,10 @@ cn10k_nix_prep_lmt_mseg_vector(struct rte_mbuf **mbufs, uint64x2_t *cmd0,
 		}
 	}
 
-	lmt_used = 0;
 	for (j = 0; j < NIX_DESCS_PER_LOOP;) {
 		/* Fit consecutive packets in same LMTLINE. */
 		if ((segdw[j] + segdw[j + 1]) <= 8) {
 			if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
-				cn10k_nix_prepare_mseg_vec(mbufs[j], NULL,
-							   &cmd0[j], &cmd1[j],
-							   segdw[j], flags);
-				cn10k_nix_prepare_mseg_vec(mbufs[j + 1], NULL,
-							   &cmd0[j + 1],
-							   &cmd1[j + 1],
-							   segdw[j + 1], flags);
 				/* TSTAMP takes 4 each, no segs. */
 				vst1q_u64(lmt_addr, cmd0[j]);
 				vst1q_u64(lmt_addr + 2, cmd2[j]);
@@ -1643,23 +1649,11 @@ cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr,
 {
 	uint8_t off;
 
-	/* Handle no fast free when security is enabled without mseg */
-	if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
-	    (flags & NIX_TX_OFFLOAD_SECURITY_F) &&
-	    !(flags & NIX_TX_MULTI_SEG_F)) {
-		union nix_send_sg_s sg;
-
-		sg.u = vgetq_lane_u64(cmd1, 0);
-		sg.u |= (cnxk_nix_prefree_seg(mbuf) << 55);
-		cmd1 = vsetq_lane_u64(sg.u, cmd1, 0);
-
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-		sg.u = vgetq_lane_u64(cmd1, 0);
-		if (!(sg.u & (1ULL << 55)))
-			RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1,
-						0);
-		rte_io_wmb();
-#endif
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+		cn10k_nix_prepare_mseg_vec_noff(mbuf, LMT_OFF(laddr, 0, 0),
+						&cmd0, &cmd1, &cmd2, &cmd3,
+						flags);
+		return;
 	}
 	if (flags & NIX_TX_MULTI_SEG_F) {
 		if ((flags & NIX_TX_NEED_EXT_HDR) &&
-- 
2.25.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v1 2/3] net/cnxk: add sg2 descriptor support
  2022-11-17  7:25 [PATCH v1 1/3] net/cnxk: rework no-fast-free offload handling Ashwin Sekhar T K
@ 2022-11-17  7:25 ` Ashwin Sekhar T K
  2022-11-17  7:25 ` [PATCH v1 3/3] net/cnxk: add debug check for number of Tx descriptors Ashwin Sekhar T K
  1 sibling, 0 replies; 4+ messages in thread
From: Ashwin Sekhar T K @ 2022-11-17  7:25 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, pbhagavatula, psatheesh, asekhar, anoobj, gakhil, hkalra

Add support for creating packets with segments from different
pools. This is enabled by using the SG2 descriptors. SG2
descriptors are only used when the segment is to be freed
by the HW.

Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 161 +++++++++++++++++++++++++++---------
 1 file changed, 123 insertions(+), 38 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index a4c578354c..3f08a8a473 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -54,6 +54,36 @@
 
 #define NIX_NB_SEGS_TO_SEGDW(x) ((NIX_SEGDW_MAGIC >> ((x) << 2)) & 0xF)
 
+static __plt_always_inline uint8_t
+cn10k_nix_mbuf_sg_dwords(struct rte_mbuf *m)
+{
+	uint32_t nb_segs = m->nb_segs;
+	uint16_t aura0, aura;
+	int segw, sg_segs;
+
+	aura0 = roc_npa_aura_handle_to_aura(m->pool->pool_id);
+
+	nb_segs--;
+	segw = 2;
+	sg_segs = 1;
+	while (nb_segs) {
+		m = m->next;
+		aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
+		if (aura != aura0) {
+			segw += 2 + (sg_segs == 2);
+			sg_segs = 0;
+		} else {
+			segw += (sg_segs == 0); /* SUBDC */
+			segw += 1;		/* IOVA */
+			sg_segs += 1;
+			sg_segs %= 3;
+		}
+		nb_segs--;
+	}
+
+	return (segw + 1) / 2;
+}
+
 static __plt_always_inline void
 cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
 {
@@ -915,15 +945,15 @@ cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr,
 static __rte_always_inline uint16_t
 cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 {
+	uint64_t prefree = 0, aura0, aura, nb_segs, segdw;
 	struct nix_send_hdr_s *send_hdr;
-	union nix_send_sg_s *sg;
+	union nix_send_sg_s *sg, l_sg;
+	union nix_send_sg2_s l_sg2;
 	struct rte_mbuf *m_next;
-	uint64_t *slist, sg_u;
+	uint8_t off, is_sg2;
 	uint64_t len, dlen;
 	uint64_t ol_flags;
-	uint64_t nb_segs;
-	uint64_t segdw;
-	uint8_t off, i;
+	uint64_t *slist;
 
 	send_hdr = (struct nix_send_hdr_s *)cmd;
 
@@ -938,20 +968,22 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 		ol_flags = m->ol_flags;
 
 	/* Start from second segment, first segment is already there */
-	i = 1;
-	sg_u = sg->u;
-	len -= sg_u & 0xFFFF;
+	is_sg2 = 0;
+	l_sg.u = sg->u;
+	len -= l_sg.u & 0xFFFF;
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
 	slist = &cmd[3 + off + 1];
 
 	/* Set invert df if buffer is not to be freed by H/W */
-	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-		sg_u |= (cnxk_nix_prefree_seg(m) << 55);
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+		prefree = cnxk_nix_prefree_seg(m);
+		l_sg.i1 = prefree;
+	}
 
-		/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-	if (!(sg_u & (1ULL << 55)))
+	/* Mark mempool object as "put" since it is freed by NIX */
+	if (!prefree)
 		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 	rte_io_wmb();
 #endif
@@ -964,55 +996,103 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 	if (!(flags & NIX_TX_MULTI_SEG_F))
 		goto done;
 
+	aura0 = send_hdr->w0.aura;
 	m = m_next;
 	if (!m)
 		goto done;
 
 	/* Fill mbuf segments */
 	do {
+		uint64_t iova;
+
+		/* Save the current mbuf properties. These can get cleared in
+		 * cnxk_nix_prefree_seg()
+		 */
 		m_next = m->next;
+		iova = rte_mbuf_data_iova(m);
 		dlen = m->data_len;
 		len -= dlen;
-		sg_u = sg_u | ((uint64_t)dlen << (i << 4));
-		*slist = rte_mbuf_data_iova(m);
-		/* Set invert df if buffer is not to be freed by H/W */
-		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-			sg_u |= (cnxk_nix_prefree_seg(m) << (i + 55));
-			/* Mark mempool object as "put" since it is freed by NIX
-			 */
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-		if (!(sg_u & (1ULL << (i + 55))))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
-#endif
-		slist++;
-		i++;
+
 		nb_segs--;
-		if (i > 2 && nb_segs) {
-			i = 0;
+		aura = aura0;
+		prefree = 0;
+
+		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+			aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
+			prefree = cnxk_nix_prefree_seg(m);
+			is_sg2 = aura != aura0 && !prefree;
+		}
+
+		if (unlikely(is_sg2)) {
+			/* This mbuf belongs to a different pool and
+			 * DF bit is not to be set, so use SG2 subdesc
+			 * so that it is freed to the appropriate pool.
+			 */
+
+			/* Write the previous descriptor out */
+			sg->u = l_sg.u;
+
+			/* If the current SG subdc does not have any
+			 * iovas in it, then the SG2 subdc can overwrite
+			 * that SG subdc.
+			 *
+			 * If the current SG subdc has 2 iovas in it, then
+			 * the current iova word should be left empty.
+			 */
+			slist += (-1 + (int)l_sg.segs);
+			sg = (union nix_send_sg_s *)slist;
+
+			l_sg2.u = l_sg.u & 0xC00000000000000; /* LD_TYPE */
+			l_sg2.subdc = NIX_SUBDC_SG2;
+			l_sg2.aura = aura;
+			l_sg2.seg1_size = dlen;
+			l_sg.u = l_sg2.u;
+
+			slist++;
+			*slist = iova;
+			slist++;
+		} else {
+			*slist = iova;
+			/* Set invert df if buffer is not to be freed by H/W */
+			l_sg.u |= (prefree << (l_sg.segs + 55));
+			/* Set the segment length */
+			l_sg.u |= ((uint64_t)dlen << (l_sg.segs << 4));
+			l_sg.segs += 1;
+			slist++;
+		}
+
+		if ((is_sg2 || l_sg.segs > 2) && nb_segs) {
+			sg->u = l_sg.u;
 			/* Next SG subdesc */
-			*(uint64_t *)slist = sg_u & 0xFC00000000000000;
-			sg->u = sg_u;
-			sg->segs = 3;
 			sg = (union nix_send_sg_s *)slist;
-			sg_u = sg->u;
+			l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+			l_sg.subdc = NIX_SUBDC_SG;
 			slist++;
 		}
 		m->next = NULL;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+		/* Mark mempool object as "put" since it is freed by NIX
+		 */
+		if (!prefree)
+			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+#endif
 		m = m_next;
 	} while (nb_segs);
 
 done:
 	/* Add remaining bytes of security data to last seg */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F && ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD && len) {
-		uint8_t shft = ((i - 1) << 4);
+		uint8_t shft = (l_sg.subdc == NIX_SUBDC_SG) ? ((l_sg.segs - 1) << 4) : 0;
 
-		dlen = ((sg_u >> shft) & 0xFFFFULL) + len;
-		sg_u = sg_u & ~(0xFFFFULL << shft);
-		sg_u |= dlen << shft;
+		dlen = ((l_sg.u >> shft) & 0xFFFFULL) + len;
+		l_sg.u = l_sg.u & ~(0xFFFFULL << shft);
+		l_sg.u |= dlen << shft;
 	}
 
-	sg->u = sg_u;
-	sg->segs = i;
+	/* Write the last subdc out */
+	sg->u = l_sg.u;
+
 	segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
 	/* Roundup extra dwords to multiple of 2 */
 	segdw = (segdw >> 1) + (segdw & 0x1);
@@ -1827,7 +1907,12 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 				struct rte_mbuf *m = tx_pkts[j];
 
 				/* Get dwords based on nb_segs. */
-				segdw[j] = NIX_NB_SEGS_TO_SEGDW(m->nb_segs);
+				if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F &&
+				      flags & NIX_TX_MULTI_SEG_F))
+					segdw[j] = NIX_NB_SEGS_TO_SEGDW(m->nb_segs);
+				else
+					segdw[j] = cn10k_nix_mbuf_sg_dwords(m);
+
 				/* Add dwords based on offloads. */
 				segdw[j] += 1 + /* SEND HDR */
 					    !!(flags & NIX_TX_NEED_EXT_HDR) +
-- 
2.25.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v1 3/3] net/cnxk: add debug check for number of Tx descriptors
  2022-11-17  7:25 [PATCH v1 1/3] net/cnxk: rework no-fast-free offload handling Ashwin Sekhar T K
  2022-11-17  7:25 ` [PATCH v1 2/3] net/cnxk: add sg2 descriptor support Ashwin Sekhar T K
@ 2022-11-17  7:25 ` Ashwin Sekhar T K
  2023-01-06  8:58   ` Jerin Jacob
  1 sibling, 1 reply; 4+ messages in thread
From: Ashwin Sekhar T K @ 2022-11-17  7:25 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, pbhagavatula, psatheesh, asekhar, anoobj, gakhil, hkalra

When SG2 descriptors are used and more than 5 segments
are present, in certain combination of segments the
number of descriptors required will be greater than
16.

In debug builds, add an assert to capture this scenario.

Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 3f08a8a473..09c332b2b5 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -84,6 +84,22 @@ cn10k_nix_mbuf_sg_dwords(struct rte_mbuf *m)
 	return (segw + 1) / 2;
 }
 
+static __plt_always_inline void
+cn10k_nix_tx_mbuf_validate(struct rte_mbuf *m, const uint32_t flags)
+{
+#ifdef RTE_LIBRTE_MBUF_DEBUG
+	uint16_t segdw;
+
+	segdw = cn10k_nix_mbuf_sg_dwords(m);
+	segdw += 1 + !!(flags & NIX_TX_NEED_EXT_HDR) + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
+
+	PLT_ASSERT(segdw <= 8);
+#else
+	RTE_SET_USED(m);
+	RTE_SET_USED(flags);
+#endif
+}
+
 static __plt_always_inline void
 cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
 {
@@ -1307,6 +1323,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 	}
 
 	for (i = 0; i < burst; i++) {
+		cn10k_nix_tx_mbuf_validate(tx_pkts[i], flags);
+
 		/* Perform header writes for TSO, barrier at
 		 * lmt steorl will suffice.
 		 */
@@ -1906,6 +1924,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 			for (j = 0; j < NIX_DESCS_PER_LOOP; j++) {
 				struct rte_mbuf *m = tx_pkts[j];
 
+				cn10k_nix_tx_mbuf_validate(m, flags);
+
 				/* Get dwords based on nb_segs. */
 				if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F &&
 				      flags & NIX_TX_MULTI_SEG_F))
-- 
2.25.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v1 3/3] net/cnxk: add debug check for number of Tx descriptors
  2022-11-17  7:25 ` [PATCH v1 3/3] net/cnxk: add debug check for number of Tx descriptors Ashwin Sekhar T K
@ 2023-01-06  8:58   ` Jerin Jacob
  0 siblings, 0 replies; 4+ messages in thread
From: Jerin Jacob @ 2023-01-06  8:58 UTC (permalink / raw)
  To: Ashwin Sekhar T K
  Cc: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, jerinj, pbhagavatula, psatheesh, anoobj, gakhil,
	hkalra

On Thu, Nov 17, 2022 at 12:56 PM Ashwin Sekhar T K <asekhar@marvell.com> wrote:
>
> When SG2 descriptors are used and more than 5 segments
> are present, in certain combination of segments the
> number of descriptors required will be greater than
> 16.
>
> In debug builds, add an assert to capture this scenario.
>
> Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>


Series applied to dpdk-next-net-mrvl/for-next-net. Thanks


> ---
>  drivers/net/cnxk/cn10k_tx.h | 20 ++++++++++++++++++++
>  1 file changed, 20 insertions(+)
>
> diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
> index 3f08a8a473..09c332b2b5 100644
> --- a/drivers/net/cnxk/cn10k_tx.h
> +++ b/drivers/net/cnxk/cn10k_tx.h
> @@ -84,6 +84,22 @@ cn10k_nix_mbuf_sg_dwords(struct rte_mbuf *m)
>         return (segw + 1) / 2;
>  }
>
> +static __plt_always_inline void
> +cn10k_nix_tx_mbuf_validate(struct rte_mbuf *m, const uint32_t flags)
> +{
> +#ifdef RTE_LIBRTE_MBUF_DEBUG
> +       uint16_t segdw;
> +
> +       segdw = cn10k_nix_mbuf_sg_dwords(m);
> +       segdw += 1 + !!(flags & NIX_TX_NEED_EXT_HDR) + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
> +
> +       PLT_ASSERT(segdw <= 8);
> +#else
> +       RTE_SET_USED(m);
> +       RTE_SET_USED(flags);
> +#endif
> +}
> +
>  static __plt_always_inline void
>  cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
>  {
> @@ -1307,6 +1323,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
>         }
>
>         for (i = 0; i < burst; i++) {
> +               cn10k_nix_tx_mbuf_validate(tx_pkts[i], flags);
> +
>                 /* Perform header writes for TSO, barrier at
>                  * lmt steorl will suffice.
>                  */
> @@ -1906,6 +1924,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
>                         for (j = 0; j < NIX_DESCS_PER_LOOP; j++) {
>                                 struct rte_mbuf *m = tx_pkts[j];
>
> +                               cn10k_nix_tx_mbuf_validate(m, flags);
> +
>                                 /* Get dwords based on nb_segs. */
>                                 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F &&
>                                       flags & NIX_TX_MULTI_SEG_F))
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-01-06  8:59 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-17  7:25 [PATCH v1 1/3] net/cnxk: rework no-fast-free offload handling Ashwin Sekhar T K
2022-11-17  7:25 ` [PATCH v1 2/3] net/cnxk: add sg2 descriptor support Ashwin Sekhar T K
2022-11-17  7:25 ` [PATCH v1 3/3] net/cnxk: add debug check for number of Tx descriptors Ashwin Sekhar T K
2023-01-06  8:58   ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).