DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: fix rxq/txq stats memory access sync
@ 2022-04-20 15:32 Raja Zidane
  2022-04-26  9:51 ` Raslan Darawsheh
  0 siblings, 1 reply; 2+ messages in thread
From: Raja Zidane @ 2022-04-20 15:32 UTC (permalink / raw)
  To: dev; +Cc: matan, viacheslavo, stable

Queue statistics are being continuously updated in Rx/Tx burst
routines while handling traffic. In addition to that, statistics
can be reset (written with zeroes) on statistics reset in other
threads, causing a race condition, which in turn could result in
wrong stats.

The patch provides an approach with reference values, allowing
the actual counters to be writable within Rx/Tx burst threads
only, and updating reference values on stats reset.

Fixes: 87011737b715 ("mlx5: add software counters")
Cc: stable@dpdk.org

Signed-off-by: Raja Zidane <rzidane@nvidia.com>
Acked-by: Slava Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rx.h    |  1 +
 drivers/net/mlx5/mlx5_stats.c | 40 +++++++++++++++++++++--------------
 drivers/net/mlx5/mlx5_tx.h    |  1 +
 3 files changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 5bf88b6181..e715ed6b62 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -126,6 +126,7 @@ struct mlx5_rxq_data {
 	struct mlx5_dev_ctx_shared *sh; /* Shared context. */
 	uint16_t idx; /* Queue index. */
 	struct mlx5_rxq_stats stats;
+	struct mlx5_rxq_stats stats_reset; /* stats on last reset. */
 	rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
 	struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
 	struct mlx5_uar_data uar_data; /* CQ doorbell. */
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 732775954a..f64fa3587b 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -114,18 +114,23 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 		idx = rxq->idx;
 		if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
 #ifdef MLX5_PMD_SOFT_COUNTERS
-			tmp.q_ipackets[idx] += rxq->stats.ipackets;
-			tmp.q_ibytes[idx] += rxq->stats.ibytes;
+			tmp.q_ipackets[idx] += rxq->stats.ipackets -
+				rxq->stats_reset.ipackets;
+			tmp.q_ibytes[idx] += rxq->stats.ibytes -
+				rxq->stats_reset.ibytes;
 #endif
 			tmp.q_errors[idx] += (rxq->stats.idropped +
-					      rxq->stats.rx_nombuf);
+					      rxq->stats.rx_nombuf) -
+					      (rxq->stats_reset.idropped +
+					      rxq->stats_reset.rx_nombuf);
 		}
 #ifdef MLX5_PMD_SOFT_COUNTERS
-		tmp.ipackets += rxq->stats.ipackets;
-		tmp.ibytes += rxq->stats.ibytes;
+		tmp.ipackets += rxq->stats.ipackets - rxq->stats_reset.ipackets;
+		tmp.ibytes += rxq->stats.ibytes - rxq->stats_reset.ibytes;
 #endif
-		tmp.ierrors += rxq->stats.idropped;
-		tmp.rx_nombuf += rxq->stats.rx_nombuf;
+		tmp.ierrors += rxq->stats.idropped - rxq->stats_reset.idropped;
+		tmp.rx_nombuf += rxq->stats.rx_nombuf -
+					rxq->stats_reset.rx_nombuf;
 	}
 	for (i = 0; (i != priv->txqs_n); ++i) {
 		struct mlx5_txq_data *txq = (*priv->txqs)[i];
@@ -135,15 +140,17 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 		idx = txq->idx;
 		if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
 #ifdef MLX5_PMD_SOFT_COUNTERS
-			tmp.q_opackets[idx] += txq->stats.opackets;
-			tmp.q_obytes[idx] += txq->stats.obytes;
+			tmp.q_opackets[idx] += txq->stats.opackets -
+						txq->stats_reset.opackets;
+			tmp.q_obytes[idx] += txq->stats.obytes -
+						txq->stats_reset.obytes;
 #endif
 		}
 #ifdef MLX5_PMD_SOFT_COUNTERS
-		tmp.opackets += txq->stats.opackets;
-		tmp.obytes += txq->stats.obytes;
+		tmp.opackets += txq->stats.opackets - txq->stats_reset.opackets;
+		tmp.obytes += txq->stats.obytes - txq->stats_reset.obytes;
 #endif
-		tmp.oerrors += txq->stats.oerrors;
+		tmp.oerrors += txq->stats.oerrors - txq->stats_reset.oerrors;
 	}
 	ret = mlx5_os_read_dev_stat(priv, "out_of_buffer", &tmp.imissed);
 	if (ret == 0) {
@@ -185,13 +192,14 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
 
 		if (rxq_data == NULL)
 			continue;
-		memset(&rxq_data->stats, 0, sizeof(struct mlx5_rxq_stats));
+		rxq_data->stats_reset = rxq_data->stats;
 	}
 	for (i = 0; (i != priv->txqs_n); ++i) {
-		if ((*priv->txqs)[i] == NULL)
+		struct mlx5_txq_data *txq_data = (*priv->txqs)[i];
+
+		if (txq_data == NULL)
 			continue;
-		memset(&(*priv->txqs)[i]->stats, 0,
-		       sizeof(struct mlx5_txq_stats));
+		txq_data->stats_reset = txq_data->stats;
 	}
 	mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base);
 	stats_ctrl->imissed = 0;
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index dfa04612ff..20776919c2 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -164,6 +164,7 @@ struct mlx5_txq_data {
 	int32_t ts_offset; /* Timestamp field dynamic offset. */
 	struct mlx5_dev_ctx_shared *sh; /* Shared context. */
 	struct mlx5_txq_stats stats; /* TX queue counters. */
+	struct mlx5_txq_stats stats_reset; /* stats on last reset. */
 	struct mlx5_uar_data uar_data;
 	struct rte_mbuf *elts[0];
 	/* Storage for queued packets, must be the last field. */
-- 
2.21.0


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-04-26  9:51 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-20 15:32 [PATCH] net/mlx5: fix rxq/txq stats memory access sync Raja Zidane
2022-04-26  9:51 ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).