DPDK patches and discussions
 help / color / mirror / Atom feed
From: Matan Azrad <matan@mellanox.com>
To: dev@dpdk.org
Cc: Viacheslav Ovsiienko <viacheslavo@mellanox.com>,
	Shahaf Shuler <shahafs@mellanox.com>,
	Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v2 3/4] vdpa/mlx5: support virtio queue statistics get
Date: Tue,  5 May 2020 15:54:43 +0000	[thread overview]
Message-ID: <1588694084-381748-4-git-send-email-matan@mellanox.com> (raw)
In-Reply-To: <1588694084-381748-1-git-send-email-matan@mellanox.com>

Add support for statistics operations.

A DevX counter object is allocated per virtq in order to manage the
virtq statistics.

The counter object is allocated before the virtq creation and destroyed
after it, so the statistics are valid only in the life time of the
virtq.

Signed-off-by: Matan Azrad <matan@mellanox.com>
---
 doc/guides/vdpadevs/features/mlx5.ini |  1 +
 drivers/vdpa/mlx5/mlx5_vdpa.c         | 85 +++++++++++++++++++++++++++++++++
 drivers/vdpa/mlx5/mlx5_vdpa.h         | 45 ++++++++++++++++++
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c   | 90 +++++++++++++++++++++++++++++++++++
 4 files changed, 221 insertions(+)

diff --git a/doc/guides/vdpadevs/features/mlx5.ini b/doc/guides/vdpadevs/features/mlx5.ini
index 1da9c1b..788d4e0 100644
--- a/doc/guides/vdpadevs/features/mlx5.ini
+++ b/doc/guides/vdpadevs/features/mlx5.ini
@@ -17,6 +17,7 @@ packed               = Y
 proto mq             = Y
 proto log shmfd      = Y
 proto host notifier  = Y
+queue statistics     = Y
 Other kdrv           = Y
 ARMv8                = Y
 Power8               = Y
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 1113d6c..a80e3f4 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -8,6 +8,7 @@
 #include <rte_errno.h>
 #include <rte_bus_pci.h>
 #include <rte_pci.h>
+#include <rte_string_fns.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_common.h>
@@ -274,6 +275,85 @@
 	return 0;
 }
 
+static int
+mlx5_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names,
+			  unsigned int size)
+{
+	static const char *mlx5_vdpa_stats_names[MLX5_VDPA_STATS_MAX] = {
+		"received_descriptors",
+		"completed_descriptors",
+		"bad descriptor errors",
+		"exceed max chain",
+		"invalid buffer",
+		"completion errors",
+	};
+	struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+	unsigned int i;
+
+	if (priv == NULL) {
+		DRV_LOG(ERR, "Invalid device id: %d.", did);
+		return -ENODEV;
+	}
+	if (!stats_names)
+		return MLX5_VDPA_STATS_MAX;
+	size = RTE_MIN(size, (unsigned int)MLX5_VDPA_STATS_MAX);
+	for (i = 0; i < size; ++i)
+		strlcpy(stats_names[i].name, mlx5_vdpa_stats_names[i],
+			RTE_VDPA_STATS_NAME_SIZE);
+	return size;
+}
+
+static int
+mlx5_vdpa_get_stats(int did, int qid, struct rte_vdpa_stat *stats,
+		    unsigned int n)
+{
+	struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+	if (priv == NULL) {
+		DRV_LOG(ERR, "Invalid device id: %d.", did);
+		return -ENODEV;
+	}
+	if (!priv->configured) {
+		DRV_LOG(ERR, "Device %d was not configured.", did);
+		return -ENODATA;
+	}
+	if (qid >= (int)priv->nr_virtqs) {
+		DRV_LOG(ERR, "Too big vring id: %d.", qid);
+		return -E2BIG;
+	}
+	if (!priv->caps.queue_counters_valid) {
+		DRV_LOG(ERR, "Virtq statistics is not supported for device %d.",
+			did);
+		return -ENOTSUP;
+	}
+	return mlx5_vdpa_virtq_stats_get(priv, qid, stats, n);
+}
+
+static int
+mlx5_vdpa_reset_stats(int did, int qid)
+{
+	struct mlx5_vdpa_priv *priv = mlx5_vdpa_find_priv_resource_by_did(did);
+
+	if (priv == NULL) {
+		DRV_LOG(ERR, "Invalid device id: %d.", did);
+		return -ENODEV;
+	}
+	if (!priv->configured) {
+		DRV_LOG(ERR, "Device %d was not configured.", did);
+		return -ENODATA;
+	}
+	if (qid >= (int)priv->nr_virtqs) {
+		DRV_LOG(ERR, "Too big vring id: %d.", qid);
+		return -E2BIG;
+	}
+	if (!priv->caps.queue_counters_valid) {
+		DRV_LOG(ERR, "Virtq statistics is not supported for device %d.",
+			did);
+		return -ENOTSUP;
+	}
+	return mlx5_vdpa_virtq_stats_reset(priv, qid);
+}
+
 static struct rte_vdpa_dev_ops mlx5_vdpa_ops = {
 	.get_queue_num = mlx5_vdpa_get_queue_num,
 	.get_features = mlx5_vdpa_get_vdpa_features,
@@ -286,6 +366,9 @@
 	.get_vfio_group_fd = NULL,
 	.get_vfio_device_fd = mlx5_vdpa_get_device_fd,
 	.get_notify_area = mlx5_vdpa_get_notify_area,
+	.get_stats_names = mlx5_vdpa_get_stats_names,
+	.get_stats = mlx5_vdpa_get_stats,
+	.reset_stats = mlx5_vdpa_reset_stats,
 };
 
 static struct ibv_device *
@@ -489,6 +572,8 @@
 		rte_errno = ENOTSUP;
 		goto error;
 	}
+	if (!attr.vdpa.queue_counters_valid)
+		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
 			   sizeof(struct mlx5_vdpa_virtq) *
 			   attr.vdpa.max_num_virtio_queues * 2,
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index fcc216a..80b4c4b 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -76,6 +76,7 @@ struct mlx5_vdpa_virtq {
 	uint16_t vq_size;
 	struct mlx5_vdpa_priv *priv;
 	struct mlx5_devx_obj *virtq;
+	struct mlx5_devx_obj *counters;
 	struct mlx5_vdpa_event_qp eqp;
 	struct {
 		struct mlx5dv_devx_umem *obj;
@@ -83,6 +84,7 @@ struct mlx5_vdpa_virtq {
 		uint32_t size;
 	} umems[3];
 	struct rte_intr_handle intr_handle;
+	struct mlx5_devx_virtio_q_couners_attr reset;
 };
 
 struct mlx5_vdpa_steer {
@@ -127,6 +129,16 @@ struct mlx5_vdpa_priv {
 	struct mlx5_vdpa_virtq virtqs[];
 };
 
+enum {
+	MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
+	MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
+	MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
+	MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
+	MLX5_VDPA_STATS_INVALID_BUFFER,
+	MLX5_VDPA_STATS_COMPLETION_ERRORS,
+	MLX5_VDPA_STATS_MAX
+};
+
 /*
  * Check whether virtq is for traffic receive.
  * According to VIRTIO_NET Spec the virtqueues index identity its type by:
@@ -352,4 +364,37 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
  */
 int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
 
+/**
+ * Get virtq statistics.
+ *
+ * @param[in] priv
+ *   The vdpa driver private structure.
+ * @param[in] qid
+ *   The virtq index.
+ * @param stats
+ *   The virtq statistics array to fill.
+ * @param n
+ *   The number of elements in @p stats array.
+ *
+ * @return
+ *   A negative value on error, otherwise the number of entries filled in the
+ *   @p stats array.
+ */
+int
+mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
+			  struct rte_vdpa_stat *stats, unsigned int n);
+
+/**
+ * Reset virtq statistics.
+ *
+ * @param[in] priv
+ *   The vdpa driver private structure.
+ * @param[in] qid
+ *   The virtq index.
+ *
+ * @return
+ *   A negative value on error, otherwise 0.
+ */
+int
+mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
 #endif /* RTE_PMD_MLX5_VDPA_H_ */
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index bd48460..d57ed59 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -72,6 +72,11 @@
 			rte_free(virtq->umems[i].buf);
 	}
 	memset(&virtq->umems, 0, sizeof(virtq->umems));
+	if (virtq->counters) {
+		claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
+		virtq->counters = NULL;
+	}
+	memset(&virtq->reset, 0, sizeof(virtq->reset));
 	if (virtq->eqp.fw_qp)
 		mlx5_vdpa_event_qp_destroy(&virtq->eqp);
 	return 0;
@@ -205,6 +210,16 @@
 		DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
 			" need event QPs and event mechanism.", index);
 	}
+	if (priv->caps.queue_counters_valid) {
+		virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
+								    (priv->ctx);
+		if (!virtq->counters) {
+			DRV_LOG(ERR, "Failed to create virtq couners for virtq"
+				" %d.", index);
+			goto error;
+		}
+		attr.counters_obj_id = virtq->counters->id;
+	}
 	/* Setup 3 UMEMs for each virtq. */
 	for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
 		virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
@@ -455,3 +470,78 @@
 	}
 	return 0;
 }
+
+int
+mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
+			  struct rte_vdpa_stat *stats, unsigned int n)
+{
+	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
+	struct mlx5_devx_virtio_q_couners_attr attr = {0};
+	int ret;
+
+	if (!virtq->virtq)
+		DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
+			"synchronization failed.", qid);
+	MLX5_ASSERT(virtq->counters);
+	ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
+	if (ret) {
+		DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
+		return ret;
+	}
+	ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
+	if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
+		return ret;
+	stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
+		.id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
+		.value = attr.received_desc - virtq->reset.received_desc,
+	};
+	if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
+		return ret;
+	stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
+		.id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
+		.value = attr.completed_desc - virtq->reset.completed_desc,
+	};
+	if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
+		return ret;
+	stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
+		.id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
+		.value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
+	};
+	if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
+		return ret;
+	stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
+		.id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
+		.value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
+	};
+	if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
+		return ret;
+	stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
+		.id = MLX5_VDPA_STATS_INVALID_BUFFER,
+		.value = attr.invalid_buffer - virtq->reset.invalid_buffer,
+	};
+	if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
+		return ret;
+	stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
+		.id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
+		.value = attr.error_cqes - virtq->reset.error_cqes,
+	};
+	return ret;
+}
+
+int
+mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
+{
+	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
+	int ret;
+
+	if (!virtq->virtq)
+		DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
+			"synchronization failed.", qid);
+	MLX5_ASSERT(virtq->counters);
+	ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
+						    &virtq->reset);
+	if (ret)
+		DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
+			qid);
+	return ret;
+}
-- 
1.8.3.1


  parent reply	other threads:[~2020-05-05 15:55 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-02 11:26 [dpdk-dev] [PATCH 0/4] vhost: support vDPA virtio queue statistics Matan Azrad
2020-04-02 11:26 ` [dpdk-dev] [PATCH 1/4] vhost: inroduce operation to get vDPA queue stats Matan Azrad
2020-04-15 14:36   ` Maxime Coquelin
2020-04-16  9:06     ` Matan Azrad
2020-04-16 13:19       ` Maxime Coquelin
2020-04-19  6:18         ` Shahaf Shuler
2020-04-20  7:13           ` Maxime Coquelin
2020-04-20 15:57             ` Shahaf Shuler
2020-04-20 16:18               ` Maxime Coquelin
2020-04-21  5:02                 ` Shahaf Shuler
2020-04-02 11:26 ` [dpdk-dev] [PATCH 2/4] common/mlx5: support DevX virtq stats operations Matan Azrad
2020-04-02 11:26 ` [dpdk-dev] [PATCH 3/4] vdpa/mlx5: support virtio queue statistics get Matan Azrad
2020-04-02 11:26 ` [dpdk-dev] [PATCH 4/4] examples/vdpa: add statistics show command Matan Azrad
2020-05-05 15:54 ` [dpdk-dev] [PATCH v2 0/4] vhost: support vDPA virtio queue statistics Matan Azrad
2020-05-05 15:54   ` [dpdk-dev] [PATCH v2 1/4] vhost: inroduce operation to get vDPA queue stats Matan Azrad
2020-05-05 15:54   ` [dpdk-dev] [PATCH v2 2/4] common/mlx5: support DevX virtq stats operations Matan Azrad
2020-05-05 15:54   ` Matan Azrad [this message]
2020-05-05 15:54   ` [dpdk-dev] [PATCH v2 4/4] examples/vdpa: add statistics show command Matan Azrad
2020-05-07 11:35   ` [dpdk-dev] [PATCH v2 0/4] vhost: support vDPA virtio queue statistics Matan Azrad
2020-06-02 15:47   ` [dpdk-dev] [PATCH v3 " Matan Azrad
2020-06-02 15:47     ` [dpdk-dev] [PATCH v3 1/4] vhost: inroduce operation to get vDPA queue stats Matan Azrad
2020-06-03  8:58       ` Maxime Coquelin
2020-06-04 10:36         ` Wang, Xiao W
2020-06-09  9:18           ` Maxime Coquelin
2020-06-02 15:47     ` [dpdk-dev] [PATCH v3 2/4] common/mlx5: support DevX virtq stats operations Matan Azrad
2020-06-18 10:58       ` Maxime Coquelin
2020-06-02 15:47     ` [dpdk-dev] [PATCH v3 3/4] vdpa/mlx5: support virtio queue statistics get Matan Azrad
2020-06-18 11:05       ` Maxime Coquelin
2020-06-02 15:47     ` [dpdk-dev] [PATCH v3 4/4] examples/vdpa: add statistics show command Matan Azrad
2020-06-18 12:13       ` Maxime Coquelin
2020-06-18 16:29     ` [dpdk-dev] [PATCH v3 0/4] vhost: support vDPA virtio queue statistics Maxime Coquelin
2020-06-19  6:01       ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1588694084-381748-4-git-send-email-matan@mellanox.com \
    --to=matan@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=shahafs@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).