DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: fix trimming SW ring for vectorized Rx
@ 2017-08-03 21:12 Yongseok Koh
  2017-08-03 21:20 ` Thomas Monjalon
  0 siblings, 1 reply; 2+ messages in thread
From: Yongseok Koh @ 2017-08-03 21:12 UTC (permalink / raw)
  To: adrien.mazarguil, nelio.laranjeiro; +Cc: dev, Yongseok Koh

Unlike mlx5_rx_burst(), mlx5_rx_burst_vec() doesn't replace completed
buffers one by one right after completion is processed but replenishes
multiple buffers later with rte_mempool_get_bulk(). Therefore, there could
be some buffer addresses left in the SW ring (rxq->elts[]) which have
already been delivered to application. As PMD doesn't own such buffers, it
must not be freed by PMD.  "Trimming" is needed before cleanup.

A problem can be seen when quitting testpmd when
CONFIG_RTE_LIBRTE_MBUF_DEBUG=y and CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=y

Trimming should be as simple as possible, it shouldn't touch any indexes
and buffer allocation isn't necessary.

Fixes: 6cb559d67b83 ("net/mlx5: add vectorized Rx/Tx burst for x86")

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---

This patch should be applied on top of:
	net/mlx5: fix MTU update
, which has been sent by Nelio.

 drivers/net/mlx5/mlx5_rxq.c | 31 ++++++++++---------------------
 1 file changed, 10 insertions(+), 21 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index a82c415cd..74387a797 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -642,11 +642,8 @@ priv_rehash_flows(struct priv *priv)
  *
  * @param rxq
  *   Pointer to RX queue structure.
- *
- * @return
- *   0 on success, errno value on failure.
  */
-static int
+static void
 rxq_trim_elts(struct rxq *rxq)
 {
 	const uint16_t q_n = (1 << rxq->elts_n);
@@ -655,17 +652,11 @@ rxq_trim_elts(struct rxq *rxq)
 	uint16_t i;
 
 	if (!rxq->trim_elts)
-		return 0;
-	for (i = 0; i < used; ++i) {
-		struct rte_mbuf *buf;
-		buf = rte_pktmbuf_alloc(rxq->mp);
-		if (!buf)
-			return ENOMEM;
-		(*rxq->elts)[(rxq->rq_ci + i) & q_mask] = buf;
-	}
-	rxq->rq_pi = rxq->rq_ci;
+		return;
+	for (i = 0; i < used; ++i)
+		(*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
 	rxq->trim_elts = 0;
-	return 0;
+	return;
 }
 
 /**
@@ -696,15 +687,13 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
 		volatile struct mlx5_wqe_data_seg *scat =
 			&(*rxq_ctrl->rxq.wqes)[i];
 
-		if (pool != NULL) {
-			buf = (*pool)[i];
-			assert(buf != NULL);
+		buf = (pool != NULL) ? (*pool)[i] : NULL;
+		if (buf != NULL) {
 			rte_pktmbuf_reset(buf);
 			rte_pktmbuf_refcnt_update(buf, 1);
 		} else
 			buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
 		if (buf == NULL) {
-			assert(pool == NULL);
 			ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
 			ret = ENOMEM;
 			goto error;
@@ -759,6 +748,7 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
 {
 	unsigned int i;
 
+	rxq_trim_elts(&rxq_ctrl->rxq);
 	DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
 	if (rxq_ctrl->rxq.elts == NULL)
 		return;
@@ -1078,9 +1068,8 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
 	if (rxq_ctrl->rxq.elts_n) {
 		assert(1 << rxq_ctrl->rxq.elts_n == desc);
 		assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts);
-		ret = rxq_trim_elts(&rxq_ctrl->rxq);
-		if (!ret)
-			ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
+		rxq_trim_elts(&rxq_ctrl->rxq);
+		ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
 	} else
 		ret = rxq_alloc_elts(&tmpl, desc, NULL);
 	if (ret) {
-- 
2.11.0

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2017-08-03 21:21 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-03 21:12 [dpdk-dev] [PATCH] net/mlx5: fix trimming SW ring for vectorized Rx Yongseok Koh
2017-08-03 21:20 ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).