DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/2] net/mlx5: add mbuf fast free Tx offload
@ 2020-12-18 17:14 Viacheslav Ovsiienko
  2020-12-18 17:14 ` [dpdk-dev] [PATCH 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
                   ` (3 more replies)
  0 siblings, 4 replies; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2020-12-18 17:14 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas

This patch adds support of the mbuf fast free offload to the
transmit datapath. This offload allows to free the mbufs on
transmit completion in the most efficient way. It requires
the all mbufs were allocated from the same pool, have
the reference counter value as 1, and have no any externally
attached buffers.

The patchset is split in two parts, the first one is overall
send loop optimization and can be ported back to stable release,
and also is the preparation step before introducing the fast
free offload. The second part provides the code for the feature.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

Viacheslav Ovsiienko (2):
  net/mlx5: optimize inline mbuf freeing
  net/mlx5: add mbuf fast free offload support

 drivers/net/mlx5/mlx5_rxtx.c | 67 ++++++++++++++++++++++++++++++++++++++------
 drivers/net/mlx5/mlx5_rxtx.h |  2 ++
 drivers/net/mlx5/mlx5_txq.c  |  6 ++++
 3 files changed, 66 insertions(+), 9 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-dev] [PATCH 1/2] net/mlx5: optimize inline mbuf freeing
  2020-12-18 17:14 [dpdk-dev] [PATCH 0/2] net/mlx5: add mbuf fast free Tx offload Viacheslav Ovsiienko
@ 2020-12-18 17:14 ` Viacheslav Ovsiienko
  2020-12-18 17:14 ` [dpdk-dev] [PATCH 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2020-12-18 17:14 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas, stable

The mlx5 PMD supports packet data inlining by pushing data
to the transmit descriptor. If packet is short enough and all
data are inline, the mbuf is not needed for data send anymore
and can be freed.

The mbuf free was performed in the most inner loop building
the transmit descriptors. This patch postpones the mbuf free
transaction to the tx_burst routine exit, optimizing the loop
and allowing the bulk freeing for the multiple mbufs in single
pool API call.

Cc: stable@dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 38 ++++++++++++++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_rxtx.h |  1 +
 2 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index d12d746..e8c8783 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1940,6 +1940,17 @@ enum mlx5_txcmp_code {
 		}
 	}
 }
+/*
+ * No inline version to free buffers for optimal call
+ * on the tx_burst completion.
+ */
+static __rte_noinline void
+__mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+		    unsigned int pkts_n,
+		    unsigned int olx __rte_unused)
+{
+	mlx5_tx_free_mbuf(pkts, pkts_n, olx);
+}
 
 /**
  * Free the mbuf from the elts ring buffer till new tail.
@@ -4392,10 +4403,25 @@ enum mlx5_txcmp_code {
 			MLX5_ASSERT(room >= tlen);
 			room -= tlen;
 			/*
-			 * Packet data are completely inlined,
-			 * free the packet immediately.
+			 * Packet data are completely inline,
+			 * we can try to free the packet.
+			 */
+			if (likely(loc->pkts_sent == loc->mbuf_free)) {
+				/*
+				 * All the packets from the burst beginning
+				 * are inline, we can free mbufs directly
+				 * from the origin array on tx_burst exit().
+				 */
+				loc->mbuf_free++;
+				goto next_mbuf;
+			}
+			/*
+			 * In order no to call rte_pktmbuf_free_seg() here,
+			 * in the most inner loop (that might be very
+			 * expensive) we just save the mbuf in elts.
 			 */
-			rte_pktmbuf_free_seg(loc->mbuf);
+			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+			loc->elts_free--;
 			goto next_mbuf;
 pointer_empw:
 			/*
@@ -4417,6 +4443,7 @@ enum mlx5_txcmp_code {
 			mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
 			/* We have to store mbuf in elts.*/
 			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+			loc->elts_free--;
 			room -= MLX5_WQE_DSEG_SIZE;
 			/* Ring buffer wraparound is checked at the loop end.*/
 			++dseg;
@@ -4426,7 +4453,6 @@ enum mlx5_txcmp_code {
 			slen += dlen;
 #endif
 			loc->pkts_sent++;
-			loc->elts_free--;
 			pkts_n--;
 			if (unlikely(!pkts_n || !loc->elts_free)) {
 				/*
@@ -4880,6 +4906,8 @@ enum mlx5_txcmp_code {
 	MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
 	if (unlikely(!pkts_n))
 		return 0;
+	if (MLX5_TXOFF_CONFIG(INLINE))
+		loc.mbuf_free = 0;
 	loc.pkts_sent = 0;
 	loc.pkts_copy = 0;
 	loc.wqe_last = NULL;
@@ -5143,6 +5171,8 @@ enum mlx5_txcmp_code {
 	/* Increment sent packets counter. */
 	txq->stats.opackets += loc.pkts_sent;
 #endif
+	if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
+		__mlx5_tx_free_mbuf(pkts, loc.mbuf_free, olx);
 	return loc.pkts_sent;
 }
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 7989a50..fc5cc2e 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -217,6 +217,7 @@ struct mlx5_txq_local {
 	uint16_t wqe_free; /* available wqe remain. */
 	uint16_t mbuf_off; /* data offset in current mbuf. */
 	uint16_t mbuf_nseg; /* number of remaining mbuf. */
+	uint16_t mbuf_free; /* number of inline mbufs to free. */
 };
 
 /* TX queue descriptor. */
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/mlx5: add mbuf fast free offload support
  2020-12-18 17:14 [dpdk-dev] [PATCH 0/2] net/mlx5: add mbuf fast free Tx offload Viacheslav Ovsiienko
  2020-12-18 17:14 ` [dpdk-dev] [PATCH 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
@ 2020-12-18 17:14 ` Viacheslav Ovsiienko
  2020-12-18 17:59   ` Thomas Monjalon
  2021-01-06  8:34 ` [dpdk-dev] [PATCH v2 0/2] " Viacheslav Ovsiienko
  2021-01-22 17:12 ` [dpdk-dev] [PATCH v3 0/2] " Viacheslav Ovsiienko
  3 siblings, 1 reply; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2020-12-18 17:14 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas

This patch adds support of the mbuf fast free offload to the
transmit datapath. This offload allows to free the mbufs on
transmit completion in the most efficient way. It requires
the all mbufs were allocated from the same pool, have
the reference counter value as 1, and have no any externally
attached buffers.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 35 +++++++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5_rxtx.h |  1 +
 drivers/net/mlx5/mlx5_txq.c  |  6 ++++++
 3 files changed, 34 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index e8c8783..9545283 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1839,6 +1839,8 @@ enum mlx5_txcmp_code {
 /**
  * Free the mbufs from the linear array of pointers.
  *
+ * @param txq
+ *   Pointer to Tx queue structure.
  * @param pkts
  *   Pointer to array of packets to be free.
  * @param pkts_n
@@ -1848,7 +1850,8 @@ enum mlx5_txcmp_code {
  *   compile time and may be used for optimization.
  */
 static __rte_always_inline void
-mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+		  struct rte_mbuf **__rte_restrict pkts,
 		  unsigned int pkts_n,
 		  unsigned int olx __rte_unused)
 {
@@ -1864,6 +1867,16 @@ enum mlx5_txcmp_code {
 	 */
 	MLX5_ASSERT(pkts);
 	MLX5_ASSERT(pkts_n);
+	/*
+	 * Free mbufs directly to the pool in bulk
+	 * if fast free offload is engaged
+	 */
+	if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
+		mbuf = *pkts;
+		pool = mbuf->pool;
+		rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
+		return;
+	}
 	for (;;) {
 		for (;;) {
 			/*
@@ -1945,11 +1958,12 @@ enum mlx5_txcmp_code {
  * on the tx_burst completion.
  */
 static __rte_noinline void
-__mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+__mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+		    struct rte_mbuf **__rte_restrict pkts,
 		    unsigned int pkts_n,
 		    unsigned int olx __rte_unused)
 {
-	mlx5_tx_free_mbuf(pkts, pkts_n, olx);
+	mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
 }
 
 /**
@@ -1983,7 +1997,8 @@ enum mlx5_txcmp_code {
 		part = RTE_MIN(part, n_elts);
 		MLX5_ASSERT(part);
 		MLX5_ASSERT(part <= txq->elts_s);
-		mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
+		mlx5_tx_free_mbuf(txq,
+				  &txq->elts[txq->elts_tail & txq->elts_m],
 				  part, olx);
 		txq->elts_tail += part;
 		n_elts -= part;
@@ -5172,7 +5187,7 @@ enum mlx5_txcmp_code {
 	txq->stats.opackets += loc.pkts_sent;
 #endif
 	if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
-		__mlx5_tx_free_mbuf(pkts, loc.mbuf_free, olx);
+		__mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
 	return loc.pkts_sent;
 }
 
@@ -5827,17 +5842,19 @@ enum mlx5_txcmp_code {
 
 int
 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
-		       uint16_t tx_queue_id __rte_unused,
+		       uint16_t tx_queue_id,
 		       struct rte_eth_burst_mode *mode)
 {
 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
 	unsigned int i, olx;
 
 	for (i = 0; i < RTE_DIM(txoff_func); i++) {
 		if (pkt_burst == txoff_func[i].func) {
 			olx = txoff_func[i].olx;
 			snprintf(mode->info, sizeof(mode->info),
-				 "%s%s%s%s%s%s%s%s%s",
+				 "%s%s%s%s%s%s%s%s%s%s",
 				 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
 				 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
 				 "Legacy MPW" : "Enhanced MPW") : "No MPW",
@@ -5856,7 +5873,9 @@ enum mlx5_txcmp_code {
 				 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
 				 " + METADATA" : "",
 				 (olx & MLX5_TXOFF_CONFIG_TXPP) ?
-				 " + TXPP" : "");
+				 " + TXPP" : "",
+				 (txq && txq->fast_free) ?
+				 " + Fast Free" : "");
 			return 0;
 		}
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index fc5cc2e..1b2983d 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -251,6 +251,7 @@ struct mlx5_txq_data {
 	uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
 	uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
 	uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
+	uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
 	uint16_t inlen_send; /* Ordinary send data inline size. */
 	uint16_t inlen_empw; /* eMPW max packet size to inline. */
 	uint16_t inlen_mode; /* Minimal data length to inline. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index d96abef..c51c85b 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -123,6 +123,8 @@
 				     DEV_TX_OFFLOAD_GRE_TNL_TSO |
 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
 	}
+	if (!config->mprq.enabled)
+		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -800,6 +802,10 @@
 	bool vlan_inline;
 	unsigned int temp;
 
+	txq_ctrl->txq.fast_free =
+		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
 #if defined(RTE_ARCH_ARM64)
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [PATCH 2/2] net/mlx5: add mbuf fast free offload support
  2020-12-18 17:14 ` [dpdk-dev] [PATCH 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
@ 2020-12-18 17:59   ` Thomas Monjalon
  0 siblings, 0 replies; 16+ messages in thread
From: Thomas Monjalon @ 2020-12-18 17:59 UTC (permalink / raw)
  To: Viacheslav Ovsiienko; +Cc: dev, rasland, matan, orika

18/12/2020 18:14, Viacheslav Ovsiienko:
> This patch adds support of the mbuf fast free offload to the
> transmit datapath. This offload allows to free the mbufs on
> transmit completion in the most efficient way. It requires
> the all mbufs were allocated from the same pool, have
> the reference counter value as 1, and have no any externally
> attached buffers.
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5_rxtx.c | 35 +++++++++++++++++++++++++++--------
>  drivers/net/mlx5/mlx5_rxtx.h |  1 +
>  drivers/net/mlx5/mlx5_txq.c  |  6 ++++++

The doc update is missing:
	- release notes
	- mlx5 guide
	- mlx5 features file



^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-dev] [PATCH v2 0/2] add mbuf fast free offload support
  2020-12-18 17:14 [dpdk-dev] [PATCH 0/2] net/mlx5: add mbuf fast free Tx offload Viacheslav Ovsiienko
  2020-12-18 17:14 ` [dpdk-dev] [PATCH 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
  2020-12-18 17:14 ` [dpdk-dev] [PATCH 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
@ 2021-01-06  8:34 ` Viacheslav Ovsiienko
  2021-01-06  8:34   ` [dpdk-dev] [PATCH v2 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
  2021-01-06  8:34   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
  2021-01-22 17:12 ` [dpdk-dev] [PATCH v3 0/2] " Viacheslav Ovsiienko
  3 siblings, 2 replies; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2021-01-06  8:34 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas, akozyrev

This patch adds support of the mbuf fast free offload to the
transmit datapath. This offload allows to free the mbufs on
transmit completion in the most efficient way. It requires
the all mbufs were allocated from the same pool, have
the reference counter value as 1, and have no any externally
attached buffers.

The patchset is split in two parts, the first one is overall
send loop optimization and can be ported back to stable release,
and also is the preparation step before introducing the fast
free offload. The second part provides the code for the feature.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

---
v1: http://patches.dpdk.org/patch/85482/
v2: release notes, PMD features and documentation update

Viacheslav Ovsiienko (2):
  net/mlx5: optimize inline mbuf freeing
  net/mlx5: add mbuf fast free offload support

 doc/guides/nics/features/mlx5.ini      |  1 +
 doc/guides/nics/mlx5.rst               | 12 ++++++
 doc/guides/rel_notes/release_21_02.rst |  7 +++-
 drivers/net/mlx5/mlx5_rxtx.c           | 67 +++++++++++++++++++++++++++++-----
 drivers/net/mlx5/mlx5_rxtx.h           |  2 +
 drivers/net/mlx5/mlx5_txq.c            |  6 +++
 6 files changed, 85 insertions(+), 10 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-dev] [PATCH v2 1/2] net/mlx5: optimize inline mbuf freeing
  2021-01-06  8:34 ` [dpdk-dev] [PATCH v2 0/2] " Viacheslav Ovsiienko
@ 2021-01-06  8:34   ` Viacheslav Ovsiienko
  2021-01-06  8:34   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
  1 sibling, 0 replies; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2021-01-06  8:34 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas, akozyrev, stable

The mlx5 PMD supports packet data inlining by pushing data
to the transmit descriptor. If packet is short enough and all
data are inline, the mbuf is not needed for data send anymore
and can be freed.

The mbuf free was performed in the most inner loop building
the transmit descriptors. This patch postpones the mbuf free
transaction to the tx_burst routine exit, optimizing the loop
and allowing the bulk freeing for the multiple mbufs in single
pool API call.

Cc: stable@dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 38 ++++++++++++++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_rxtx.h |  1 +
 2 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 65a1f99..ee56a72 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1990,6 +1990,17 @@ enum mlx5_txcmp_code {
 		}
 	}
 }
+/*
+ * No inline version to free buffers for optimal call
+ * on the tx_burst completion.
+ */
+static __rte_noinline void
+__mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+		    unsigned int pkts_n,
+		    unsigned int olx __rte_unused)
+{
+	mlx5_tx_free_mbuf(pkts, pkts_n, olx);
+}
 
 /**
  * Free the mbuf from the elts ring buffer till new tail.
@@ -4408,10 +4419,25 @@ enum mlx5_txcmp_code {
 			MLX5_ASSERT(room >= tlen);
 			room -= tlen;
 			/*
-			 * Packet data are completely inlined,
-			 * free the packet immediately.
+			 * Packet data are completely inline,
+			 * we can try to free the packet.
+			 */
+			if (likely(loc->pkts_sent == loc->mbuf_free)) {
+				/*
+				 * All the packets from the burst beginning
+				 * are inline, we can free mbufs directly
+				 * from the origin array on tx_burst exit().
+				 */
+				loc->mbuf_free++;
+				goto next_mbuf;
+			}
+			/*
+			 * In order no to call rte_pktmbuf_free_seg() here,
+			 * in the most inner loop (that might be very
+			 * expensive) we just save the mbuf in elts.
 			 */
-			rte_pktmbuf_free_seg(loc->mbuf);
+			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+			loc->elts_free--;
 			goto next_mbuf;
 pointer_empw:
 			/*
@@ -4433,6 +4459,7 @@ enum mlx5_txcmp_code {
 			mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
 			/* We have to store mbuf in elts.*/
 			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+			loc->elts_free--;
 			room -= MLX5_WQE_DSEG_SIZE;
 			/* Ring buffer wraparound is checked at the loop end.*/
 			++dseg;
@@ -4442,7 +4469,6 @@ enum mlx5_txcmp_code {
 			slen += dlen;
 #endif
 			loc->pkts_sent++;
-			loc->elts_free--;
 			pkts_n--;
 			if (unlikely(!pkts_n || !loc->elts_free)) {
 				/*
@@ -4892,6 +4918,8 @@ enum mlx5_txcmp_code {
 	MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
 	if (unlikely(!pkts_n))
 		return 0;
+	if (MLX5_TXOFF_CONFIG(INLINE))
+		loc.mbuf_free = 0;
 	loc.pkts_sent = 0;
 	loc.pkts_copy = 0;
 	loc.wqe_last = NULL;
@@ -5155,6 +5183,8 @@ enum mlx5_txcmp_code {
 	/* Increment sent packets counter. */
 	txq->stats.opackets += loc.pkts_sent;
 #endif
+	if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
+		__mlx5_tx_free_mbuf(pkts, loc.mbuf_free, olx);
 	return loc.pkts_sent;
 }
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 1e9345a..af47839 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -217,6 +217,7 @@ struct mlx5_txq_local {
 	uint16_t wqe_free; /* available wqe remain. */
 	uint16_t mbuf_off; /* data offset in current mbuf. */
 	uint16_t mbuf_nseg; /* number of remaining mbuf. */
+	uint16_t mbuf_free; /* number of inline mbufs to free. */
 };
 
 /* TX queue descriptor. */
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-dev] [PATCH v2 2/2] net/mlx5: add mbuf fast free offload support
  2021-01-06  8:34 ` [dpdk-dev] [PATCH v2 0/2] " Viacheslav Ovsiienko
  2021-01-06  8:34   ` [dpdk-dev] [PATCH v2 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
@ 2021-01-06  8:34   ` Viacheslav Ovsiienko
  1 sibling, 0 replies; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2021-01-06  8:34 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas, akozyrev

This patch adds support of the mbuf fast free offload to the
transmit datapath. This offload allows to free the mbufs on
transmit completion in the most efficient way. It requires
the all mbufs were allocated from the same pool, have
the reference counter value as 1, and have no any extarnally
attached buffers.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 doc/guides/nics/features/mlx5.ini      |  1 +
 doc/guides/nics/mlx5.rst               | 12 ++++++++++++
 doc/guides/rel_notes/release_21_02.rst |  7 ++++++-
 drivers/net/mlx5/mlx5_rxtx.c           | 35 ++++++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5_rxtx.h           |  1 +
 drivers/net/mlx5/mlx5_txq.c            |  6 ++++++
 6 files changed, 53 insertions(+), 9 deletions(-)

diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index 54ec95d..72348cd 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -9,6 +9,7 @@ Link status          = Y
 Link status event    = Y
 Removal event        = Y
 Rx interrupt         = Y
+Fast mbuf free       = Y
 Queue start/stop     = Y
 Burst mode info      = Y
 MTU update           = Y
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 3bda0f8..dee4404 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -74,6 +74,7 @@ Features
 - RX VLAN stripping.
 - TX VLAN insertion.
 - RX CRC stripping configuration.
+- TX mbuf fast free offload.
 - Promiscuous mode on PF and VF.
 - Multicast promiscuous mode on PF and VF.
 - Hardware checksum offloads.
@@ -320,6 +321,17 @@ Limitations
     for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
     The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
 
+- TX mbuf fast free:
+
+  - fast free offload assumes the all mbufs being sent are originated from the
+    same memory pool and there is no any extra references to the mbufs (the
+    reference counter for each mbuf is equal 1 on tx_burst call). The latter
+    means there should be no any externally attached buffers in mbufs. It is
+    an application responsibility to provide the correct mbufs if the fast
+    free offload is engaged. The mlx5 PMD implicitely produces the mbufs with
+    externally attached buffers if MPRQ option is enabled, hence, the fast
+    free offload is neither supported nor advertised if there is MPRQ enabled.
+
 - Sample flow:
 
   - Supports ``RTE_FLOW_ACTION_TYPE_SAMPLE`` action only within NIC Rx and E-Switch steering domain.
diff --git a/doc/guides/rel_notes/release_21_02.rst b/doc/guides/rel_notes/release_21_02.rst
index 638f981..9fa0634 100644
--- a/doc/guides/rel_notes/release_21_02.rst
+++ b/doc/guides/rel_notes/release_21_02.rst
@@ -27,7 +27,7 @@ New Features
 .. This section should contain new features added in this release.
    Sample format:
 
-   * **Add a title in the past tense with a full stop.**
+   * **Added support for the Tx mbuf fast free offload.**
 
      Add a short 1-2 sentence description in the past tense.
      The description should be enough to allow someone scanning
@@ -55,6 +55,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Mellanox mlx5 driver.**
+
+  Updated the Mellanox mlx5 driver with new features and improvements, including:
+
+  * Added vectorized Multi-Packet Rx Queue burst.
 
 Removed Items
 -------------
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index ee56a72..680904e 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1889,6 +1889,8 @@ enum mlx5_txcmp_code {
 /**
  * Free the mbufs from the linear array of pointers.
  *
+ * @param txq
+ *   Pointer to Tx queue structure.
  * @param pkts
  *   Pointer to array of packets to be free.
  * @param pkts_n
@@ -1898,7 +1900,8 @@ enum mlx5_txcmp_code {
  *   compile time and may be used for optimization.
  */
 static __rte_always_inline void
-mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+		  struct rte_mbuf **__rte_restrict pkts,
 		  unsigned int pkts_n,
 		  unsigned int olx __rte_unused)
 {
@@ -1914,6 +1917,16 @@ enum mlx5_txcmp_code {
 	 */
 	MLX5_ASSERT(pkts);
 	MLX5_ASSERT(pkts_n);
+	/*
+	 * Free mbufs directly to the pool in bulk
+	 * if fast free offload is engaged
+	 */
+	if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
+		mbuf = *pkts;
+		pool = mbuf->pool;
+		rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
+		return;
+	}
 	for (;;) {
 		for (;;) {
 			/*
@@ -1995,11 +2008,12 @@ enum mlx5_txcmp_code {
  * on the tx_burst completion.
  */
 static __rte_noinline void
-__mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+__mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+		    struct rte_mbuf **__rte_restrict pkts,
 		    unsigned int pkts_n,
 		    unsigned int olx __rte_unused)
 {
-	mlx5_tx_free_mbuf(pkts, pkts_n, olx);
+	mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
 }
 
 /**
@@ -2033,7 +2047,8 @@ enum mlx5_txcmp_code {
 		part = RTE_MIN(part, n_elts);
 		MLX5_ASSERT(part);
 		MLX5_ASSERT(part <= txq->elts_s);
-		mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
+		mlx5_tx_free_mbuf(txq,
+				  &txq->elts[txq->elts_tail & txq->elts_m],
 				  part, olx);
 		txq->elts_tail += part;
 		n_elts -= part;
@@ -5184,7 +5199,7 @@ enum mlx5_txcmp_code {
 	txq->stats.opackets += loc.pkts_sent;
 #endif
 	if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
-		__mlx5_tx_free_mbuf(pkts, loc.mbuf_free, olx);
+		__mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
 	return loc.pkts_sent;
 }
 
@@ -5829,17 +5844,19 @@ enum mlx5_txcmp_code {
 
 int
 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
-		       uint16_t tx_queue_id __rte_unused,
+		       uint16_t tx_queue_id,
 		       struct rte_eth_burst_mode *mode)
 {
 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
 	unsigned int i, olx;
 
 	for (i = 0; i < RTE_DIM(txoff_func); i++) {
 		if (pkt_burst == txoff_func[i].func) {
 			olx = txoff_func[i].olx;
 			snprintf(mode->info, sizeof(mode->info),
-				 "%s%s%s%s%s%s%s%s%s",
+				 "%s%s%s%s%s%s%s%s%s%s",
 				 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
 				 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
 				 "Legacy MPW" : "Enhanced MPW") : "No MPW",
@@ -5858,7 +5875,9 @@ enum mlx5_txcmp_code {
 				 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
 				 " + METADATA" : "",
 				 (olx & MLX5_TXOFF_CONFIG_TXPP) ?
-				 " + TXPP" : "");
+				 " + TXPP" : "",
+				 (txq && txq->fast_free) ?
+				 " + Fast Free" : "");
 			return 0;
 		}
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index af47839..b78ca60 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -251,6 +251,7 @@ struct mlx5_txq_data {
 	uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
 	uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
 	uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
+	uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
 	uint16_t inlen_send; /* Ordinary send data inline size. */
 	uint16_t inlen_empw; /* eMPW max packet size to inline. */
 	uint16_t inlen_mode; /* Minimal data length to inline. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index d96abef..c51c85b 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -123,6 +123,8 @@
 				     DEV_TX_OFFLOAD_GRE_TNL_TSO |
 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
 	}
+	if (!config->mprq.enabled)
+		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -800,6 +802,10 @@
 	bool vlan_inline;
 	unsigned int temp;
 
+	txq_ctrl->txq.fast_free =
+		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
 #if defined(RTE_ARCH_ARM64)
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-dev] [PATCH v3 0/2] add mbuf fast free offload support
  2020-12-18 17:14 [dpdk-dev] [PATCH 0/2] net/mlx5: add mbuf fast free Tx offload Viacheslav Ovsiienko
                   ` (2 preceding siblings ...)
  2021-01-06  8:34 ` [dpdk-dev] [PATCH v2 0/2] " Viacheslav Ovsiienko
@ 2021-01-22 17:12 ` Viacheslav Ovsiienko
  2021-01-22 17:12   ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
                     ` (3 more replies)
  3 siblings, 4 replies; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2021-01-22 17:12 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas, akozyrev

This patch adds support of the mbuf fast free offload to the
transmit datapath. This offload allows to free the mbufs on
transmit completion in the most efficient way. It requires
the all mbufs were allocated from the same pool, have
the reference counter value as 1, and have no any externally
attached buffers.

The patchset is split in two parts, the first one is overall
send loop optimization and can be ported back to stable release,
and also is the preparation step before introducing the fast
free offload. The second part provides the code for the feature.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

---
v1: http://patches.dpdk.org/patch/85482/
v2: release notes, PMD features and documentation update
v3: typo fixes, documentation update

Viacheslav Ovsiienko (2):
  net/mlx5: optimize inline mbuf freeing
  net/mlx5: add mbuf fast free offload support

 doc/guides/nics/features/mlx5.ini      |  1 +
 doc/guides/nics/mlx5.rst               | 12 ++++++
 doc/guides/rel_notes/release_21_02.rst |  2 +-
 drivers/net/mlx5/mlx5_rxtx.c           | 67 +++++++++++++++++++++++++++++-----
 drivers/net/mlx5/mlx5_rxtx.h           |  2 +
 drivers/net/mlx5/mlx5_txq.c            |  6 +++
 6 files changed, 80 insertions(+), 10 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-dev] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing
  2021-01-22 17:12 ` [dpdk-dev] [PATCH v3 0/2] " Viacheslav Ovsiienko
@ 2021-01-22 17:12   ` Viacheslav Ovsiienko
  2021-01-27 12:44     ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit
  2021-01-22 17:12   ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2021-01-22 17:12 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas, akozyrev, stable

The mlx5 PMD supports packet data inlining by pushing data
to the transmit descriptor. If packet is short enough and all
data are inline, the mbuf is not needed for data send anymore
and can be freed.

The mbuf free was performed in the most inner loop building
the transmit descriptors. This patch postpones the mbuf free
transaction to the tx_burst routine exit, optimizing the loop
and allowing the bulk freeing for the multiple mbufs in single
pool API call.

Cc: stable@dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 38 ++++++++++++++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_rxtx.h |  1 +
 2 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 3497765..97912dd 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1990,6 +1990,17 @@ enum mlx5_txcmp_code {
 		}
 	}
 }
+/*
+ * No inline version to free buffers for optimal call
+ * on the tx_burst completion.
+ */
+static __rte_noinline void
+__mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+		    unsigned int pkts_n,
+		    unsigned int olx __rte_unused)
+{
+	mlx5_tx_free_mbuf(pkts, pkts_n, olx);
+}
 
 /**
  * Free the mbuf from the elts ring buffer till new tail.
@@ -4408,10 +4419,25 @@ enum mlx5_txcmp_code {
 			MLX5_ASSERT(room >= tlen);
 			room -= tlen;
 			/*
-			 * Packet data are completely inlined,
-			 * free the packet immediately.
+			 * Packet data are completely inline,
+			 * we can try to free the packet.
+			 */
+			if (likely(loc->pkts_sent == loc->mbuf_free)) {
+				/*
+				 * All the packets from the burst beginning
+				 * are inline, we can free mbufs directly
+				 * from the origin array on tx_burst exit().
+				 */
+				loc->mbuf_free++;
+				goto next_mbuf;
+			}
+			/*
+			 * In order no to call rte_pktmbuf_free_seg() here,
+			 * in the most inner loop (that might be very
+			 * expensive) we just save the mbuf in elts.
 			 */
-			rte_pktmbuf_free_seg(loc->mbuf);
+			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+			loc->elts_free--;
 			goto next_mbuf;
 pointer_empw:
 			/*
@@ -4433,6 +4459,7 @@ enum mlx5_txcmp_code {
 			mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
 			/* We have to store mbuf in elts.*/
 			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+			loc->elts_free--;
 			room -= MLX5_WQE_DSEG_SIZE;
 			/* Ring buffer wraparound is checked at the loop end.*/
 			++dseg;
@@ -4442,7 +4469,6 @@ enum mlx5_txcmp_code {
 			slen += dlen;
 #endif
 			loc->pkts_sent++;
-			loc->elts_free--;
 			pkts_n--;
 			if (unlikely(!pkts_n || !loc->elts_free)) {
 				/*
@@ -4892,6 +4918,8 @@ enum mlx5_txcmp_code {
 	MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
 	if (unlikely(!pkts_n))
 		return 0;
+	if (MLX5_TXOFF_CONFIG(INLINE))
+		loc.mbuf_free = 0;
 	loc.pkts_sent = 0;
 	loc.pkts_copy = 0;
 	loc.wqe_last = NULL;
@@ -5155,6 +5183,8 @@ enum mlx5_txcmp_code {
 	/* Increment sent packets counter. */
 	txq->stats.opackets += loc.pkts_sent;
 #endif
+	if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
+		__mlx5_tx_free_mbuf(pkts, loc.mbuf_free, olx);
 	return loc.pkts_sent;
 }
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 7756ed3..9dac408 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -209,6 +209,7 @@ struct mlx5_txq_local {
 	uint16_t wqe_free; /* available wqe remain. */
 	uint16_t mbuf_off; /* data offset in current mbuf. */
 	uint16_t mbuf_nseg; /* number of remaining mbuf. */
+	uint16_t mbuf_free; /* number of inline mbufs to free. */
 };
 
 /* TX queue descriptor. */
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-dev] [PATCH v3 2/2] net/mlx5: add mbuf fast free offload support
  2021-01-22 17:12 ` [dpdk-dev] [PATCH v3 0/2] " Viacheslav Ovsiienko
  2021-01-22 17:12   ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
@ 2021-01-22 17:12   ` Viacheslav Ovsiienko
  2021-01-26 10:34   ` [dpdk-dev] [PATCH v3 0/2] " Matan Azrad
  2021-01-27  9:31   ` Raslan Darawsheh
  3 siblings, 0 replies; 16+ messages in thread
From: Viacheslav Ovsiienko @ 2021-01-22 17:12 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, thomas, akozyrev

This patch adds support of the mbuf fast free offload to the
transmit datapath. This offload allows freeing the mbufs on
transmit completion in the most efficient way. It requires
the all mbufs were allocated from the same pool, have
the reference counter value as 1, and have no any externally
attached buffers.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 doc/guides/nics/features/mlx5.ini      |  1 +
 doc/guides/nics/mlx5.rst               | 12 ++++++++++++
 doc/guides/rel_notes/release_21_02.rst |  2 +-
 drivers/net/mlx5/mlx5_rxtx.c           | 35 ++++++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5_rxtx.h           |  1 +
 drivers/net/mlx5/mlx5_txq.c            |  6 ++++++
 6 files changed, 48 insertions(+), 9 deletions(-)

diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index 9d14678..a997543 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -9,6 +9,7 @@ Link status          = Y
 Link status event    = Y
 Removal event        = Y
 Rx interrupt         = Y
+Fast mbuf free       = Y
 Queue start/stop     = Y
 Burst mode info      = Y
 MTU update           = Y
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 1765952..e5f8f3f 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -74,6 +74,7 @@ Features
 - RX VLAN stripping.
 - TX VLAN insertion.
 - RX CRC stripping configuration.
+- TX mbuf fast free offload.
 - Promiscuous mode on PF and VF.
 - Multicast promiscuous mode on PF and VF.
 - Hardware checksum offloads.
@@ -353,6 +354,17 @@ Limitations
     for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
     The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
 
+- TX mbuf fast free:
+
+  - fast free offload assumes the all mbufs being sent are originated from the
+    same memory pool and there is no any extra references to the mbufs (the
+    reference counter for each mbuf is equal 1 on tx_burst call). The latter
+    means there should be no any externally attached buffers in mbufs. It is
+    an application responsibility to provide the correct mbufs if the fast
+    free offload is engaged. The mlx5 PMD implicitly produces the mbufs with
+    externally attached buffers if MPRQ option is enabled, hence, the fast
+    free offload is neither supported nor advertised if there is MPRQ enabled.
+
 - Sample flow:
 
   - Supports ``RTE_FLOW_ACTION_TYPE_SAMPLE`` action only within NIC Rx and E-Switch steering domain.
diff --git a/doc/guides/rel_notes/release_21_02.rst b/doc/guides/rel_notes/release_21_02.rst
index f52468c..0410337 100644
--- a/doc/guides/rel_notes/release_21_02.rst
+++ b/doc/guides/rel_notes/release_21_02.rst
@@ -98,6 +98,7 @@ New Features
     mirroring action on E-Switch.
   * Enlarge the number of flow priorities to 21844(0 - 21843) for ingress or egress
     flow groups greater than 0 and for any transfer flow group.
+  * Add support for the Tx mbuf fast free offload.
 
 * **Updated Wangxun txgbe driver.**
 
@@ -127,7 +128,6 @@ New Features
   * Added support for aes-cbc sha256-128-hmac cipher combination in OCTEON TX2
     crypto PMD lookaside protocol offload for IPsec.
 
-
 Removed Items
 -------------
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 97912dd..2e4b87c 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1889,6 +1889,8 @@ enum mlx5_txcmp_code {
 /**
  * Free the mbufs from the linear array of pointers.
  *
+ * @param txq
+ *   Pointer to Tx queue structure.
  * @param pkts
  *   Pointer to array of packets to be free.
  * @param pkts_n
@@ -1898,7 +1900,8 @@ enum mlx5_txcmp_code {
  *   compile time and may be used for optimization.
  */
 static __rte_always_inline void
-mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+		  struct rte_mbuf **__rte_restrict pkts,
 		  unsigned int pkts_n,
 		  unsigned int olx __rte_unused)
 {
@@ -1914,6 +1917,16 @@ enum mlx5_txcmp_code {
 	 */
 	MLX5_ASSERT(pkts);
 	MLX5_ASSERT(pkts_n);
+	/*
+	 * Free mbufs directly to the pool in bulk
+	 * if fast free offload is engaged
+	 */
+	if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
+		mbuf = *pkts;
+		pool = mbuf->pool;
+		rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
+		return;
+	}
 	for (;;) {
 		for (;;) {
 			/*
@@ -1995,11 +2008,12 @@ enum mlx5_txcmp_code {
  * on the tx_burst completion.
  */
 static __rte_noinline void
-__mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+__mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
+		    struct rte_mbuf **__rte_restrict pkts,
 		    unsigned int pkts_n,
 		    unsigned int olx __rte_unused)
 {
-	mlx5_tx_free_mbuf(pkts, pkts_n, olx);
+	mlx5_tx_free_mbuf(txq, pkts, pkts_n, olx);
 }
 
 /**
@@ -2033,7 +2047,8 @@ enum mlx5_txcmp_code {
 		part = RTE_MIN(part, n_elts);
 		MLX5_ASSERT(part);
 		MLX5_ASSERT(part <= txq->elts_s);
-		mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
+		mlx5_tx_free_mbuf(txq,
+				  &txq->elts[txq->elts_tail & txq->elts_m],
 				  part, olx);
 		txq->elts_tail += part;
 		n_elts -= part;
@@ -5184,7 +5199,7 @@ enum mlx5_txcmp_code {
 	txq->stats.opackets += loc.pkts_sent;
 #endif
 	if (MLX5_TXOFF_CONFIG(INLINE) && loc.mbuf_free)
-		__mlx5_tx_free_mbuf(pkts, loc.mbuf_free, olx);
+		__mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx);
 	return loc.pkts_sent;
 }
 
@@ -5829,17 +5844,19 @@ enum mlx5_txcmp_code {
 
 int
 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
-		       uint16_t tx_queue_id __rte_unused,
+		       uint16_t tx_queue_id,
 		       struct rte_eth_burst_mode *mode)
 {
 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
 	unsigned int i, olx;
 
 	for (i = 0; i < RTE_DIM(txoff_func); i++) {
 		if (pkt_burst == txoff_func[i].func) {
 			olx = txoff_func[i].olx;
 			snprintf(mode->info, sizeof(mode->info),
-				 "%s%s%s%s%s%s%s%s%s",
+				 "%s%s%s%s%s%s%s%s%s%s",
 				 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
 				 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
 				 "Legacy MPW" : "Enhanced MPW") : "No MPW",
@@ -5858,7 +5875,9 @@ enum mlx5_txcmp_code {
 				 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
 				 " + METADATA" : "",
 				 (olx & MLX5_TXOFF_CONFIG_TXPP) ?
-				 " + TXPP" : "");
+				 " + TXPP" : "",
+				 (txq && txq->fast_free) ?
+				 " + Fast Free" : "");
 			return 0;
 		}
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 9dac408..6432030 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -243,6 +243,7 @@ struct mlx5_txq_data {
 	uint16_t vlan_en:1; /* VLAN insertion in WQE is supported. */
 	uint16_t db_nc:1; /* Doorbell mapped to non-cached region. */
 	uint16_t db_heu:1; /* Doorbell heuristic write barrier. */
+	uint16_t fast_free:1; /* mbuf fast free on Tx is enabled. */
 	uint16_t inlen_send; /* Ordinary send data inline size. */
 	uint16_t inlen_empw; /* eMPW max packet size to inline. */
 	uint16_t inlen_mode; /* Minimal data length to inline. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index b81bb4a..bf5e9a9 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -123,6 +123,8 @@
 				     DEV_TX_OFFLOAD_GRE_TNL_TSO |
 				     DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
 	}
+	if (!config->mprq.enabled)
+		offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
 
@@ -800,6 +802,10 @@
 	bool vlan_inline;
 	unsigned int temp;
 
+	txq_ctrl->txq.fast_free =
+		!!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+		   !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+		   !config->mprq.enabled);
 	if (config->txqs_inline == MLX5_ARG_UNSET)
 		txqs_inline =
 #if defined(RTE_ARCH_ARM64)
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [PATCH v3 0/2] add mbuf fast free offload support
  2021-01-22 17:12 ` [dpdk-dev] [PATCH v3 0/2] " Viacheslav Ovsiienko
  2021-01-22 17:12   ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
  2021-01-22 17:12   ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
@ 2021-01-26 10:34   ` Matan Azrad
  2021-01-27  9:31   ` Raslan Darawsheh
  3 siblings, 0 replies; 16+ messages in thread
From: Matan Azrad @ 2021-01-26 10:34 UTC (permalink / raw)
  To: Slava Ovsiienko, dev
  Cc: Raslan Darawsheh, Ori Kam, NBU-Contact-Thomas Monjalon,
	Alexander Kozyrev



 From: Viacheslav Ovsiienko
> This patch adds support of the mbuf fast free offload to the transmit datapath.
> This offload allows to free the mbufs on transmit completion in the most
> efficient way. It requires the all mbufs were allocated from the same pool,
> have the reference counter value as 1, and have no any externally attached
> buffers.
> 
> The patchset is split in two parts, the first one is overall send loop optimization
> and can be ported back to stable release, and also is the preparation step
> before introducing the fast free offload. The second part provides the code for
> the feature.
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> 
> ---
> v1:
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatches.d
> pdk.org%2Fpatch%2F85482%2F&amp;data=04%7C01%7Cmatan%40nvidia.com
> %7Cd873758f2c354a1af0aa08d8bef8ddd9%7C43083d15727340c1b7db39efd9c
> cc17a%7C0%7C0%7C637469323804842896%7CUnknown%7CTWFpbGZsb3d8eyJ
> WIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C
> 1000&amp;sdata=BqgJn08GepCw1gpCfG7rKmqZcp1ht02HG%2Bi%2FlEXKjjc%3
> D&amp;reserved=0
> v2: release notes, PMD features and documentation update
> v3: typo fixes, documentation update
> 
> Viacheslav Ovsiienko (2):
>   net/mlx5: optimize inline mbuf freeing
>   net/mlx5: add mbuf fast free offload support
> 
>  doc/guides/nics/features/mlx5.ini      |  1 +
>  doc/guides/nics/mlx5.rst               | 12 ++++++
>  doc/guides/rel_notes/release_21_02.rst |  2 +-
>  drivers/net/mlx5/mlx5_rxtx.c           | 67 +++++++++++++++++++++++++++++---
> --
>  drivers/net/mlx5/mlx5_rxtx.h           |  2 +
>  drivers/net/mlx5/mlx5_txq.c            |  6 +++
>  6 files changed, 80 insertions(+), 10 deletions(-)
> 
> --
> 1.8.3.1
Series-acked-by: Matan Azrad <matan@nvidia.com>


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [PATCH v3 0/2] add mbuf fast free offload support
  2021-01-22 17:12 ` [dpdk-dev] [PATCH v3 0/2] " Viacheslav Ovsiienko
                     ` (2 preceding siblings ...)
  2021-01-26 10:34   ` [dpdk-dev] [PATCH v3 0/2] " Matan Azrad
@ 2021-01-27  9:31   ` Raslan Darawsheh
  3 siblings, 0 replies; 16+ messages in thread
From: Raslan Darawsheh @ 2021-01-27  9:31 UTC (permalink / raw)
  To: Slava Ovsiienko, dev
  Cc: Matan Azrad, Ori Kam, NBU-Contact-Thomas Monjalon, Alexander Kozyrev

Hi,

> -----Original Message-----
> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> Sent: Friday, January 22, 2021 7:12 PM
> To: dev@dpdk.org
> Cc: Raslan Darawsheh <rasland@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>; NBU-Contact-Thomas
> Monjalon <thomas@monjalon.net>; Alexander Kozyrev
> <akozyrev@nvidia.com>
> Subject: [PATCH v3 0/2] add mbuf fast free offload support
> 
> This patch adds support of the mbuf fast free offload to the
> transmit datapath. This offload allows to free the mbufs on
> transmit completion in the most efficient way. It requires
> the all mbufs were allocated from the same pool, have
> the reference counter value as 1, and have no any externally
> attached buffers.
> 
> The patchset is split in two parts, the first one is overall
> send loop optimization and can be ported back to stable release,
> and also is the preparation step before introducing the fast
> free offload. The second part provides the code for the feature.
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> 
> ---
> v1:
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> es.dpdk.org%2Fpatch%2F85482%2F&amp;data=04%7C01%7Crasland%40nvid
> ia.com%7Cb616de17445740f7ea7908d8bef8ddea%7C43083d15727340c1b7db
> 39efd9ccc17a%7C0%7C0%7C637469323379495159%7CUnknown%7CTWFpbG
> Zsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6
> Mn0%3D%7C1000&amp;sdata=5GosR%2BxQfWziqF3bWmOUfa%2FuTKuf%2
> BOkjdklt%2BJrnK%2FA%3D&amp;reserved=0
> v2: release notes, PMD features and documentation update
> v3: typo fixes, documentation update
> 
> Viacheslav Ovsiienko (2):
>   net/mlx5: optimize inline mbuf freeing
>   net/mlx5: add mbuf fast free offload support
> 
>  doc/guides/nics/features/mlx5.ini      |  1 +
>  doc/guides/nics/mlx5.rst               | 12 ++++++
>  doc/guides/rel_notes/release_21_02.rst |  2 +-
>  drivers/net/mlx5/mlx5_rxtx.c           | 67
> +++++++++++++++++++++++++++++-----
>  drivers/net/mlx5/mlx5_rxtx.h           |  2 +
>  drivers/net/mlx5/mlx5_txq.c            |  6 +++
>  6 files changed, 80 insertions(+), 10 deletions(-)
> 
> --
> 1.8.3.1

Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing
  2021-01-22 17:12   ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
@ 2021-01-27 12:44     ` Ferruh Yigit
  2021-01-27 12:48       ` Ferruh Yigit
  2021-01-28  9:14       ` Slava Ovsiienko
  0 siblings, 2 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-01-27 12:44 UTC (permalink / raw)
  To: Viacheslav Ovsiienko, dev; +Cc: rasland, matan, orika, thomas, akozyrev, stable

On 1/22/2021 5:12 PM, Viacheslav Ovsiienko wrote:
> The mlx5 PMD supports packet data inlining by pushing data
> to the transmit descriptor. If packet is short enough and all
> data are inline, the mbuf is not needed for data send anymore
> and can be freed.
> 
> The mbuf free was performed in the most inner loop building
> the transmit descriptors. This patch postpones the mbuf free
> transaction to the tx_burst routine exit, optimizing the loop
> and allowing the bulk freeing for the multiple mbufs in single
> pool API call.
> 
> Cc: stable@dpdk.org
> 

Hi Slava,

This patch is optimization for inline mbufs, right, it is not a fix, should it 
be backported?

cc'ed LTS maintainers.

I am dropping the stable to for now in the next-net, can add it later based on 
discussion result.

> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing
  2021-01-27 12:44     ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit
@ 2021-01-27 12:48       ` Ferruh Yigit
  2021-01-28  9:14       ` Slava Ovsiienko
  1 sibling, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-01-27 12:48 UTC (permalink / raw)
  To: Viacheslav Ovsiienko, dev
  Cc: rasland, matan, orika, thomas, akozyrev, stable, Kevin Traynor,
	Luca Boccassi

On 1/27/2021 12:44 PM, Ferruh Yigit wrote:
> On 1/22/2021 5:12 PM, Viacheslav Ovsiienko wrote:
>> The mlx5 PMD supports packet data inlining by pushing data
>> to the transmit descriptor. If packet is short enough and all
>> data are inline, the mbuf is not needed for data send anymore
>> and can be freed.
>>
>> The mbuf free was performed in the most inner loop building
>> the transmit descriptors. This patch postpones the mbuf free
>> transaction to the tx_burst routine exit, optimizing the loop
>> and allowing the bulk freeing for the multiple mbufs in single
>> pool API call.
>>
>> Cc: stable@dpdk.org
>>
> 
> Hi Slava,
> 
> This patch is optimization for inline mbufs, right, it is not a fix, should it 
> be backported?
> 
> cc'ed LTS maintainers.
> 

cc'ed now.

> I am dropping the stable to for now in the next-net, can add it later based on 
> discussion result.
> 
>> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> 


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing
  2021-01-27 12:44     ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit
  2021-01-27 12:48       ` Ferruh Yigit
@ 2021-01-28  9:14       ` Slava Ovsiienko
  2021-01-28  9:34         ` Thomas Monjalon
  1 sibling, 1 reply; 16+ messages in thread
From: Slava Ovsiienko @ 2021-01-28  9:14 UTC (permalink / raw)
  To: Ferruh Yigit, dev
  Cc: Raslan Darawsheh, Matan Azrad, Ori Kam,
	NBU-Contact-Thomas Monjalon, Alexander Kozyrev, stable

Hi, Ferruh

> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Wednesday, January 27, 2021 14:45
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; dev@dpdk.org
> Cc: Raslan Darawsheh <rasland@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>; NBU-Contact-Thomas
> Monjalon <thomas@monjalon.net>; Alexander Kozyrev
> <akozyrev@nvidia.com>; stable@dpdk.org
> Subject: Re: [dpdk-stable] [PATCH v3 1/2] net/mlx5: optimize inline mbuf
> freeing
> 
> On 1/22/2021 5:12 PM, Viacheslav Ovsiienko wrote:
> > The mlx5 PMD supports packet data inlining by pushing data to the
> > transmit descriptor. If packet is short enough and all data are
> > inline, the mbuf is not needed for data send anymore and can be freed.
> >
> > The mbuf free was performed in the most inner loop building the
> > transmit descriptors. This patch postpones the mbuf free transaction
> > to the tx_burst routine exit, optimizing the loop and allowing the
> > bulk freeing for the multiple mbufs in single pool API call.
> >
> > Cc: stable@dpdk.org
> >
> 
> Hi Slava,
> 
> This patch is optimization for inline mbufs, right, it is not a fix, should it be
> backported?
Not critical, but nice to have this small optimization in LTS.

> 
> cc'ed LTS maintainers.
> 
> I am dropping the stable to for now in the next-net, can add it later based on
> discussion result.

OK, let's consider this backporting in dedicated way, thank you.

With best regards, Slava

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing
  2021-01-28  9:14       ` Slava Ovsiienko
@ 2021-01-28  9:34         ` Thomas Monjalon
  0 siblings, 0 replies; 16+ messages in thread
From: Thomas Monjalon @ 2021-01-28  9:34 UTC (permalink / raw)
  To: Slava Ovsiienko
  Cc: Ferruh Yigit, dev, Raslan Darawsheh, Matan Azrad, Ori Kam,
	Alexander Kozyrev, stable, bluca, kevin.traynor,
	Christian Ehrhardt

28/01/2021 10:14, Slava Ovsiienko:
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> > On 1/22/2021 5:12 PM, Viacheslav Ovsiienko wrote:
> > > The mlx5 PMD supports packet data inlining by pushing data to the
> > > transmit descriptor. If packet is short enough and all data are
> > > inline, the mbuf is not needed for data send anymore and can be freed.
> > >
> > > The mbuf free was performed in the most inner loop building the
> > > transmit descriptors. This patch postpones the mbuf free transaction
> > > to the tx_burst routine exit, optimizing the loop and allowing the
> > > bulk freeing for the multiple mbufs in single pool API call.
> > >
> > > Cc: stable@dpdk.org
> > >
> > 
> > Hi Slava,
> > 
> > This patch is optimization for inline mbufs, right, it is not a fix, should it be
> > backported?
> Not critical, but nice to have this small optimization in LTS.
> 
> > 
> > cc'ed LTS maintainers.
> > 
> > I am dropping the stable to for now in the next-net, can add it later based on
> > discussion result.
> 
> OK, let's consider this backporting in dedicated way, thank you.

Consensus from techboard is to reject optimizations in LTS for now.
Some acceptance guidelines will be written soon.
Not sure this one will be considered.



^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2021-01-28  9:34 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-18 17:14 [dpdk-dev] [PATCH 0/2] net/mlx5: add mbuf fast free Tx offload Viacheslav Ovsiienko
2020-12-18 17:14 ` [dpdk-dev] [PATCH 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
2020-12-18 17:14 ` [dpdk-dev] [PATCH 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
2020-12-18 17:59   ` Thomas Monjalon
2021-01-06  8:34 ` [dpdk-dev] [PATCH v2 0/2] " Viacheslav Ovsiienko
2021-01-06  8:34   ` [dpdk-dev] [PATCH v2 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
2021-01-06  8:34   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
2021-01-22 17:12 ` [dpdk-dev] [PATCH v3 0/2] " Viacheslav Ovsiienko
2021-01-22 17:12   ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: optimize inline mbuf freeing Viacheslav Ovsiienko
2021-01-27 12:44     ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit
2021-01-27 12:48       ` Ferruh Yigit
2021-01-28  9:14       ` Slava Ovsiienko
2021-01-28  9:34         ` Thomas Monjalon
2021-01-22 17:12   ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: add mbuf fast free offload support Viacheslav Ovsiienko
2021-01-26 10:34   ` [dpdk-dev] [PATCH v3 0/2] " Matan Azrad
2021-01-27  9:31   ` Raslan Darawsheh

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git