From: Raja Zidane <rzidane@nvidia.com>
To: <dev@dpdk.org>
Cc: <matan@nvidia.com>, <viacheslavo@nvidia.com>, <stable@dpdk.org>
Subject: [PATCH] net/mlx5: fix rxq/txq stats memory access sync
Date: Wed, 20 Apr 2022 18:32:17 +0300 [thread overview]
Message-ID: <20220420153217.10723-1-rzidane@nvidia.com> (raw)
Queue statistics are being continuously updated in Rx/Tx burst
routines while handling traffic. In addition to that, statistics
can be reset (written with zeroes) on statistics reset in other
threads, causing a race condition, which in turn could result in
wrong stats.
The patch provides an approach with reference values, allowing
the actual counters to be writable within Rx/Tx burst threads
only, and updating reference values on stats reset.
Fixes: 87011737b715 ("mlx5: add software counters")
Cc: stable@dpdk.org
Signed-off-by: Raja Zidane <rzidane@nvidia.com>
Acked-by: Slava Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5_rx.h | 1 +
drivers/net/mlx5/mlx5_stats.c | 40 +++++++++++++++++++++--------------
drivers/net/mlx5/mlx5_tx.h | 1 +
3 files changed, 26 insertions(+), 16 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 5bf88b6181..e715ed6b62 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -126,6 +126,7 @@ struct mlx5_rxq_data {
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
uint16_t idx; /* Queue index. */
struct mlx5_rxq_stats stats;
+ struct mlx5_rxq_stats stats_reset; /* stats on last reset. */
rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
struct mlx5_uar_data uar_data; /* CQ doorbell. */
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 732775954a..f64fa3587b 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -114,18 +114,23 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
idx = rxq->idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.q_ipackets[idx] += rxq->stats.ipackets;
- tmp.q_ibytes[idx] += rxq->stats.ibytes;
+ tmp.q_ipackets[idx] += rxq->stats.ipackets -
+ rxq->stats_reset.ipackets;
+ tmp.q_ibytes[idx] += rxq->stats.ibytes -
+ rxq->stats_reset.ibytes;
#endif
tmp.q_errors[idx] += (rxq->stats.idropped +
- rxq->stats.rx_nombuf);
+ rxq->stats.rx_nombuf) -
+ (rxq->stats_reset.idropped +
+ rxq->stats_reset.rx_nombuf);
}
#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.ipackets += rxq->stats.ipackets;
- tmp.ibytes += rxq->stats.ibytes;
+ tmp.ipackets += rxq->stats.ipackets - rxq->stats_reset.ipackets;
+ tmp.ibytes += rxq->stats.ibytes - rxq->stats_reset.ibytes;
#endif
- tmp.ierrors += rxq->stats.idropped;
- tmp.rx_nombuf += rxq->stats.rx_nombuf;
+ tmp.ierrors += rxq->stats.idropped - rxq->stats_reset.idropped;
+ tmp.rx_nombuf += rxq->stats.rx_nombuf -
+ rxq->stats_reset.rx_nombuf;
}
for (i = 0; (i != priv->txqs_n); ++i) {
struct mlx5_txq_data *txq = (*priv->txqs)[i];
@@ -135,15 +140,17 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
idx = txq->idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.q_opackets[idx] += txq->stats.opackets;
- tmp.q_obytes[idx] += txq->stats.obytes;
+ tmp.q_opackets[idx] += txq->stats.opackets -
+ txq->stats_reset.opackets;
+ tmp.q_obytes[idx] += txq->stats.obytes -
+ txq->stats_reset.obytes;
#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.opackets += txq->stats.opackets;
- tmp.obytes += txq->stats.obytes;
+ tmp.opackets += txq->stats.opackets - txq->stats_reset.opackets;
+ tmp.obytes += txq->stats.obytes - txq->stats_reset.obytes;
#endif
- tmp.oerrors += txq->stats.oerrors;
+ tmp.oerrors += txq->stats.oerrors - txq->stats_reset.oerrors;
}
ret = mlx5_os_read_dev_stat(priv, "out_of_buffer", &tmp.imissed);
if (ret == 0) {
@@ -185,13 +192,14 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
if (rxq_data == NULL)
continue;
- memset(&rxq_data->stats, 0, sizeof(struct mlx5_rxq_stats));
+ rxq_data->stats_reset = rxq_data->stats;
}
for (i = 0; (i != priv->txqs_n); ++i) {
- if ((*priv->txqs)[i] == NULL)
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[i];
+
+ if (txq_data == NULL)
continue;
- memset(&(*priv->txqs)[i]->stats, 0,
- sizeof(struct mlx5_txq_stats));
+ txq_data->stats_reset = txq_data->stats;
}
mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base);
stats_ctrl->imissed = 0;
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index dfa04612ff..20776919c2 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -164,6 +164,7 @@ struct mlx5_txq_data {
int32_t ts_offset; /* Timestamp field dynamic offset. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
struct mlx5_txq_stats stats; /* TX queue counters. */
+ struct mlx5_txq_stats stats_reset; /* stats on last reset. */
struct mlx5_uar_data uar_data;
struct rte_mbuf *elts[0];
/* Storage for queued packets, must be the last field. */
--
2.21.0
next reply other threads:[~2022-04-20 15:32 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-20 15:32 Raja Zidane [this message]
2022-04-26 9:51 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220420153217.10723-1-rzidane@nvidia.com \
--to=rzidane@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=stable@dpdk.org \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).