patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
To: dev@dpdk.org
Cc: rasland@nvidia.com, matan@nvidia.com, orika@nvidia.com,
	thomas@monjalon.net, akozyrev@nvidia.com, stable@dpdk.org
Subject: [dpdk-stable] [PATCH v2 1/2] net/mlx5: optimize inline mbuf freeing
Date: Wed,  6 Jan 2021 08:34:22 +0000	[thread overview]
Message-ID: <1609922063-13716-2-git-send-email-viacheslavo@nvidia.com> (raw)
In-Reply-To: <1609922063-13716-1-git-send-email-viacheslavo@nvidia.com>

The mlx5 PMD supports packet data inlining by pushing data
to the transmit descriptor. If packet is short enough and all
data are inline, the mbuf is not needed for data send anymore
and can be freed.

The mbuf free was performed in the most inner loop building
the transmit descriptors. This patch postpones the mbuf free
transaction to the tx_burst routine exit, optimizing the loop
and allowing the bulk freeing for the multiple mbufs in single
pool API call.

Cc: stable@dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 38 ++++++++++++++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_rxtx.h |  1 +
 2 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 65a1f99..ee56a72 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1990,6 +1990,17 @@ enum mlx5_txcmp_code {
 		}
 	}
 }
+/*
+ * No inline version to free buffers for optimal call
+ * on the tx_burst completion.
+ */
+static __rte_noinline void
+__mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+		    unsigned int pkts_n,
+		    unsigned int olx __rte_unused)
+{
+	mlx5_tx_free_mbuf(pkts, pkts_n, olx);
+}
 
 /**
  * Free the mbuf from the elts ring buffer till new tail.
@@ -4408,10 +4419,25 @@ enum mlx5_txcmp_code {
 			MLX5_ASSERT(room >= tlen);
 			room -= tlen;
 			/*
-			 * Packet data are completely inlined,
-			 * free the packet immediately.
+			 * Packet data are completely inline,
+			 * we can try to free the packet.
+			 */
+			if (likely(loc->pkts_sent == loc->mbuf_free)) {
+				/*
+				 * All the packets from the burst beginning
+				 * are inline, we can free mbufs directly
+				 * from the origin array on tx_burst exit().
+				 */
+				loc->mbuf_free++;
+				goto next_mbuf;
+			}
+			/*
+			 * In order no to call rte_pktmbuf_free_seg() here,
+			 * in the most inner loop (that might be very
+			 * expensive) we just save the mbuf in elts.
 			 */
-			rte_pktmbuf_free_seg(loc->mbuf);
+			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+			loc->elts_free--;
 			goto next_mbuf;
 pointer_empw:
 			/*
@@ -4433,6 +4459,7 @@ enum mlx5_txcmp_code {
 			mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
 			/* We have to store mbuf in elts.*/
 			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+			loc->elts_free--;
 			room -= MLX5_WQE_DSEG_SIZE;
 			/* Ring buffer wraparound is checked at the loop end.*/
 			++dseg;
@@ -4442,7 +4469,6 @@ enum mlx5_txcmp_code {
 			slen += dlen;
 #endif
 			loc->pkts_sent++;
-			loc->elts_free--;
 			pkts_n--;
 			if (unlikely(!pkts_n || !loc->elts_free)) {
 				/*
@@ -4892,6 +4918,8 @@ enum mlx5_txcmp_code {
 	MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
 	if (unlikely(!pkts_n))
 		return 0;
+	if (MLX5_TXOFF_CONFIG(INLINE))
+		loc.mbuf_free = 0;
 	loc.pkts_sent = 0;
 	loc.pkts_copy = 0;
 	loc.wqe_last = NULL;
@@ -5155,6 +5183,8 @@ enum mlx5_txcmp_code {
 	/* Increment sent packets counter. */
 	txq->stats.opackets += loc.pkts_sent;
 #endif
+	if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
+		__mlx5_tx_free_mbuf(pkts, loc.mbuf_free, olx);
 	return loc.pkts_sent;
 }
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 1e9345a..af47839 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -217,6 +217,7 @@ struct mlx5_txq_local {
 	uint16_t wqe_free; /* available wqe remain. */
 	uint16_t mbuf_off; /* data offset in current mbuf. */
 	uint16_t mbuf_nseg; /* number of remaining mbuf. */
+	uint16_t mbuf_free; /* number of inline mbufs to free. */
 };
 
 /* TX queue descriptor. */
-- 
1.8.3.1


  parent reply	other threads:[~2021-01-06  8:34 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1608311697-31529-1-git-send-email-viacheslavo@nvidia.com>
2020-12-18 17:14 ` [dpdk-stable] [PATCH " Viacheslav Ovsiienko
     [not found] ` <1609922063-13716-1-git-send-email-viacheslavo@nvidia.com>
2021-01-06  8:34   ` Viacheslav Ovsiienko [this message]
     [not found] ` <1611335529-26503-1-git-send-email-viacheslavo@nvidia.com>
2021-01-22 17:12   ` [dpdk-stable] [PATCH v3 " Viacheslav Ovsiienko
2021-01-27 12:44     ` Ferruh Yigit
2021-01-27 12:48       ` [dpdk-stable] [dpdk-dev] " Ferruh Yigit
2021-01-28  9:14       ` [dpdk-stable] " Slava Ovsiienko
2021-01-28  9:34         ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1609922063-13716-2-git-send-email-viacheslavo@nvidia.com \
    --to=viacheslavo@nvidia.com \
    --cc=akozyrev@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=stable@dpdk.org \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).