DPDK patches and discussions
 help / color / mirror / Atom feed
From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>, Maxime Coquelin <maxime.coquelin@redhat.com>
Cc: <xuemingl@nvidia.com>
Subject: [PATCH v3 7/7] vdpa/mlx5: make statistics counter persistent
Date: Sun, 8 May 2022 17:25:54 +0300	[thread overview]
Message-ID: <20220508142554.560354-8-xuemingl@nvidia.com> (raw)
In-Reply-To: <20220508142554.560354-1-xuemingl@nvidia.com>

In order to speed-up the device suspend and resume, make the statistics
counters persistent in reconfiguration until the device gets removed.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 doc/guides/vdpadevs/mlx5.rst        |  6 ++++++
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 19 +++++++----------
 drivers/vdpa/mlx5/mlx5_vdpa.h       |  1 +
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 32 +++++++++++------------------
 4 files changed, 26 insertions(+), 32 deletions(-)

diff --git a/doc/guides/vdpadevs/mlx5.rst b/doc/guides/vdpadevs/mlx5.rst
index acb791032ad..3ded142311e 100644
--- a/doc/guides/vdpadevs/mlx5.rst
+++ b/doc/guides/vdpadevs/mlx5.rst
@@ -109,3 +109,9 @@ Upon potential hardware errors, mlx5 PMD try to recover, give up if failed 3
 times in 3 seconds, virtq will be put in disable state. User should check log
 to get error information, or query vdpa statistics counter to know error type
 and count report.
+
+Statistics
+^^^^^^^^^^
+
+The device statistics counter persists in reconfiguration until the device gets
+removed. User can reset counters by calling function rte_vdpa_reset_stats().
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index b1d5487080d..76fa5d4299e 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -388,12 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (priv->state == MLX5_VDPA_STATE_PROBED) {
-		DRV_LOG(ERR, "Device %s was not configured.",
-				vdev->device->name);
-		return -ENODATA;
-	}
-	if (qid >= (int)priv->nr_virtqs) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -416,12 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (priv->state == MLX5_VDPA_STATE_PROBED) {
-		DRV_LOG(ERR, "Device %s was not configured.",
-				vdev->device->name);
-		return -ENODATA;
-	}
-	if (qid >= (int)priv->nr_virtqs) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -695,6 +685,11 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
 	uint32_t i;
 
 	mlx5_vdpa_dev_cache_clean(priv);
+	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+		if (!priv->virtqs[i].counters)
+			continue;
+		claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
+	}
 	mlx5_vdpa_event_qp_global_release(priv);
 	mlx5_vdpa_err_event_unset(priv);
 	if (priv->steer.tbl)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 24bafe85b44..e7f3319f896 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -92,6 +92,7 @@ struct mlx5_vdpa_virtq {
 	struct rte_intr_handle *intr_handle;
 	uint64_t err_time[3]; /* RDTSC time of recent errors. */
 	uint32_t n_retry;
+	struct mlx5_devx_virtio_q_couners_attr stats;
 	struct mlx5_devx_virtio_q_couners_attr reset;
 };
 
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 0dfeb8fce24..e025be47d27 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -127,14 +127,9 @@ void
 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
 {
 	int i;
-	struct mlx5_vdpa_virtq *virtq;
 
-	for (i = 0; i < priv->nr_virtqs; i++) {
-		virtq = &priv->virtqs[i];
-		mlx5_vdpa_virtq_unset(virtq);
-		if (virtq->counters)
-			claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
-	}
+	for (i = 0; i < priv->nr_virtqs; i++)
+		mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
 	priv->features = 0;
 	priv->nr_virtqs = 0;
 }
@@ -590,7 +585,7 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
 			  struct rte_vdpa_stat *stats, unsigned int n)
 {
 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
-	struct mlx5_devx_virtio_q_couners_attr attr = {0};
+	struct mlx5_devx_virtio_q_couners_attr *attr = &virtq->stats;
 	int ret;
 
 	if (!virtq->counters) {
@@ -598,7 +593,7 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
 			"is invalid.", qid);
 		return -EINVAL;
 	}
-	ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
+	ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, attr);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
 		return ret;
@@ -608,37 +603,37 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
 		return ret;
 	stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
 		.id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
-		.value = attr.received_desc - virtq->reset.received_desc,
+		.value = attr->received_desc - virtq->reset.received_desc,
 	};
 	if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
 		return ret;
 	stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
 		.id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
-		.value = attr.completed_desc - virtq->reset.completed_desc,
+		.value = attr->completed_desc - virtq->reset.completed_desc,
 	};
 	if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
 		return ret;
 	stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
 		.id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
-		.value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
+		.value = attr->bad_desc_errors - virtq->reset.bad_desc_errors,
 	};
 	if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
 		return ret;
 	stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
 		.id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
-		.value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
+		.value = attr->exceed_max_chain - virtq->reset.exceed_max_chain,
 	};
 	if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
 		return ret;
 	stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
 		.id = MLX5_VDPA_STATS_INVALID_BUFFER,
-		.value = attr.invalid_buffer - virtq->reset.invalid_buffer,
+		.value = attr->invalid_buffer - virtq->reset.invalid_buffer,
 	};
 	if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
 		return ret;
 	stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
 		.id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
-		.value = attr.error_cqes - virtq->reset.error_cqes,
+		.value = attr->error_cqes - virtq->reset.error_cqes,
 	};
 	return ret;
 }
@@ -649,11 +644,8 @@ mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
 	int ret;
 
-	if (!virtq->counters) {
-		DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
-			"is invalid.", qid);
-		return -EINVAL;
-	}
+	if (virtq->counters == NULL) /* VQ not enabled. */
+		return 0;
 	ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
 						    &virtq->reset);
 	if (ret)
-- 
2.35.1


  parent reply	other threads:[~2022-05-08 14:27 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-24 13:28 [PATCH 0/7] vdpa/mlx5: improve device shutdown time Xueming Li
2022-02-24 13:28 ` [PATCH 1/7] vdpa/mlx5: fix interrupt trash that leads to segment fault Xueming Li
2022-02-24 13:28 ` [PATCH 2/7] vdpa/mlx5: fix dead loop when process interrupted Xueming Li
2022-02-24 13:28 ` [PATCH 3/7] vdpa/mlx5: no kick handling during shutdown Xueming Li
2022-02-24 13:28 ` [PATCH 4/7] vdpa/mlx5: reuse resources in reconfiguration Xueming Li
2022-02-24 13:28 ` [PATCH 5/7] vdpa/mlx5: cache and reuse hardware resources Xueming Li
2022-02-24 13:28 ` [PATCH 6/7] vdpa/mlx5: support device cleanup callback Xueming Li
2022-02-24 13:28 ` [PATCH 7/7] vdpa/mlx5: make statistics counter persistent Xueming Li
2022-02-24 14:38 ` [PATCH v1 0/7] vdpa/mlx5: improve device shutdown time Xueming Li
2022-02-24 14:38   ` [PATCH v1 1/7] vdpa/mlx5: fix interrupt trash that leads to segment fault Xueming Li
2022-02-24 14:38   ` [PATCH v1 2/7] vdpa/mlx5: fix dead loop when process interrupted Xueming Li
2022-02-24 14:38   ` [PATCH v1 3/7] vdpa/mlx5: no kick handling during shutdown Xueming Li
2022-02-24 14:38   ` [PATCH v1 4/7] vdpa/mlx5: reuse resources in reconfiguration Xueming Li
2022-02-24 14:38   ` [PATCH v1 5/7] vdpa/mlx5: cache and reuse hardware resources Xueming Li
2022-02-24 14:38   ` [PATCH v1 6/7] vdpa/mlx5: support device cleanup callback Xueming Li
2022-02-24 14:38   ` [PATCH v1 7/7] vdpa/mlx5: make statistics counter persistent Xueming Li
2022-02-24 15:50 ` [PATCH v2 0/7] vdpa/mlx5: improve device shutdown time Xueming Li
2022-02-24 15:50   ` [PATCH v2 1/7] vdpa/mlx5: fix interrupt trash that leads to segment fault Xueming Li
2022-04-20 10:39     ` Maxime Coquelin
2022-02-24 15:50   ` [PATCH v2 2/7] vdpa/mlx5: fix dead loop when process interrupted Xueming Li
2022-04-20 10:33     ` Maxime Coquelin
2022-02-24 15:50   ` [PATCH v2 3/7] vdpa/mlx5: no kick handling during shutdown Xueming Li
2022-04-20 12:37     ` Maxime Coquelin
2022-04-20 13:23       ` Xueming(Steven) Li
2022-02-24 15:50   ` [PATCH v2 4/7] vdpa/mlx5: reuse resources in reconfiguration Xueming Li
2022-04-20 14:49     ` Maxime Coquelin
2022-02-24 15:50   ` [PATCH v2 5/7] vdpa/mlx5: cache and reuse hardware resources Xueming Li
2022-04-20 15:03     ` Maxime Coquelin
2022-04-25 13:28       ` Xueming(Steven) Li
2022-05-05 20:01         ` Maxime Coquelin
2022-02-24 15:51   ` [PATCH v2 6/7] vdpa/mlx5: support device cleanup callback Xueming Li
2022-04-21  8:19     ` Maxime Coquelin
2022-02-24 15:51   ` [PATCH v2 7/7] vdpa/mlx5: make statistics counter persistent Xueming Li
2022-04-21  8:22     ` Maxime Coquelin
2022-05-08 14:25 ` [PATCH v3 0/7] vdpa/mlx5: improve device shutdown time Xueming Li
2022-05-08 14:25   ` [PATCH v3 1/7] vdpa/mlx5: fix interrupt trash that leads to segment fault Xueming Li
2022-05-08 14:25   ` [PATCH v3 2/7] vdpa/mlx5: fix dead loop when process interrupted Xueming Li
2022-05-08 14:25   ` [PATCH v3 3/7] vdpa/mlx5: no kick handling during shutdown Xueming Li
2022-05-08 14:25   ` [PATCH v3 4/7] vdpa/mlx5: reuse resources in reconfiguration Xueming Li
2022-05-08 14:25   ` [PATCH v3 5/7] vdpa/mlx5: cache and reuse hardware resources Xueming Li
2022-05-08 14:25   ` [PATCH v3 6/7] vdpa/mlx5: support device cleanup callback Xueming Li
2022-05-08 14:25   ` Xueming Li [this message]
2022-05-09 19:38   ` [PATCH v3 0/7] vdpa/mlx5: improve device shutdown time Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220508142554.560354-8-xuemingl@nvidia.com \
    --to=xuemingl@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).