DPDK patches and discussions
 help / color / mirror / Atom feed
From: Matan Azrad <matan@mellanox.com>
To: Viacheslav Ovsiienko <viacheslavo@mellanox.com>,
	Maxime Coquelin <maxime.coquelin@redhat.com>
Cc: dev@dpdk.org, Shahaf Shuler <shahafs@mellanox.com>
Subject: [dpdk-dev] [PATCH v3 4/4] examples/vdpa: add statistics show command
Date: Tue,  2 Jun 2020 15:47:49 +0000	[thread overview]
Message-ID: <1591112869-78828-5-git-send-email-matan@mellanox.com> (raw)
In-Reply-To: <1591112869-78828-1-git-send-email-matan@mellanox.com>

A new vDPA driver feature was added to query the virtq statistics from
the HW.

Use this feature to show the HW queues statistics for the virtqs.

Command description: stats X Y.
X is the device ID.
Y is the queue ID, Y=0xffff to show all the virtio queues statistics of
the device X.

Signed-off-by: Matan Azrad <matan@mellanox.com>
---
 doc/guides/sample_app_ug/vdpa.rst |   3 +-
 examples/vdpa/main.c              | 119 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 121 insertions(+), 1 deletion(-)

diff --git a/doc/guides/sample_app_ug/vdpa.rst b/doc/guides/sample_app_ug/vdpa.rst
index 745f196..d66a724 100644
--- a/doc/guides/sample_app_ug/vdpa.rst
+++ b/doc/guides/sample_app_ug/vdpa.rst
@@ -44,7 +44,8 @@ where
   1. help: show help message
   2. list: list all available vdpa devices
   3. create: create a new vdpa port with socket file and vdpa device address
-  4. quit: unregister vhost driver and exit the application
+  4. stats: show statistics of virtio queues
+  5. quit: unregister vhost driver and exit the application
 
 Take IFCVF driver for example:
 
diff --git a/examples/vdpa/main.c b/examples/vdpa/main.c
index d9a9112..cdef715 100644
--- a/examples/vdpa/main.c
+++ b/examples/vdpa/main.c
@@ -18,6 +18,7 @@
 #include <cmdline_parse.h>
 #include <cmdline_socket.h>
 #include <cmdline_parse_string.h>
+#include <cmdline_parse_num.h>
 #include <cmdline.h>
 
 #define MAX_PATH_LEN 128
@@ -29,6 +30,9 @@ struct vdpa_port {
 	int did;
 	int vid;
 	uint64_t flags;
+	int stats_n;
+	struct rte_vdpa_stat_name *stats_names;
+	struct rte_vdpa_stat *stats;
 };
 
 static struct vdpa_port vports[MAX_VDPA_SAMPLE_PORTS];
@@ -199,6 +203,10 @@ struct vdpa_port {
 		RTE_LOG(ERR, VDPA,
 				"Fail to unregister vhost driver for %s.\n",
 				socket_path);
+	if (vport->stats_names) {
+		rte_free(vport->stats_names);
+		vport->stats_names = NULL;
+	}
 }
 
 static void
@@ -240,6 +248,7 @@ static void cmd_help_parsed(__rte_unused void *parsed_result,
 		"    help                                      : Show interactive instructions.\n"
 		"    list                                      : list all available vdpa devices.\n"
 		"    create <socket file> <vdev addr>          : create a new vdpa port.\n"
+		"    stats <device ID> <virtio queue ID>       : show statistics of virtio queue, 0xffff for all.\n"
 		"    quit                                      : exit vdpa sample app.\n"
 	);
 }
@@ -363,6 +372,115 @@ static void cmd_create_vdpa_port_parsed(void *parsed_result,
 	},
 };
 
+/* *** STATS *** */
+struct cmd_stats_result {
+	cmdline_fixed_string_t stats;
+	uint16_t did;
+	uint16_t qid;
+};
+
+static void cmd_device_stats_parsed(void *parsed_result, struct cmdline *cl,
+				    __attribute__((unused)) void *data)
+{
+	struct cmd_stats_result *res = parsed_result;
+	struct rte_vdpa_device *vdev = rte_vdpa_get_device(res->did);
+	struct vdpa_port *vport = NULL;
+	uint32_t first, last;
+	int i;
+
+	if (!vdev) {
+		RTE_LOG(ERR, VDPA, "Invalid device id %" PRIu16 ".\n",
+			res->did);
+		return;
+	}
+	for (i = 0; i < RTE_MIN(MAX_VDPA_SAMPLE_PORTS, dev_total); i++) {
+		if (vports[i].did == res->did) {
+			vport = &vports[i];
+			break;
+		}
+	}
+	if (!vport) {
+		RTE_LOG(ERR, VDPA, "Device id %" PRIu16 " was not created.\n",
+			res->did);
+		return;
+	}
+	if (res->qid == 0xFFFF) {
+		first = 0;
+		last = rte_vhost_get_vring_num(vport->vid);
+		if (last == 0) {
+			RTE_LOG(ERR, VDPA, "Failed to get num of actual virtqs"
+				" for device id %d.\n", (int)res->did);
+			return;
+		}
+		last--;
+	} else {
+		first = res->qid;
+		last = res->qid;
+	}
+	if (!vport->stats_names) {
+		vport->stats_n = rte_vdpa_get_stats_names(vport->did, NULL, 0);
+		if (vport->stats_n <= 0) {
+			RTE_LOG(ERR, VDPA, "Failed to get names number of "
+				"device %d stats.\n", (int)res->did);
+			return;
+		}
+		vport->stats_names = rte_zmalloc(NULL,
+			(sizeof(*vport->stats_names) + sizeof(*vport->stats)) *
+							vport->stats_n, 0);
+		if (!vport->stats_names) {
+			RTE_LOG(ERR, VDPA, "Failed to allocate memory for stat"
+				" names of device %d.\n", (int)res->did);
+			return;
+		}
+		i = rte_vdpa_get_stats_names(vport->did, vport->stats_names,
+						vport->stats_n);
+		if (vport->stats_n != i) {
+			RTE_LOG(ERR, VDPA, "Failed to get names of device %d "
+				"stats.\n", (int)res->did);
+			return;
+		}
+		vport->stats = (struct rte_vdpa_stat *)
+					(vport->stats_names + vport->stats_n);
+	}
+	cmdline_printf(cl, "\nDevice %d:\n", (int)res->did);
+	for (; first <= last; first++) {
+		memset(vport->stats, 0, sizeof(*vport->stats) * vport->stats_n);
+		if (rte_vdpa_get_stats(vport->did, (int)first, vport->stats,
+					vport->stats_n) <= 0) {
+			RTE_LOG(ERR, VDPA, "Failed to get vdpa queue statistics"
+				" for device id %d qid %d.\n", (int)res->did,
+				(int)first);
+			return;
+		}
+		cmdline_printf(cl, "\tVirtq %" PRIu32 ":\n", first);
+		for (i = 0; i < vport->stats_n; ++i) {
+			cmdline_printf(cl, "\t\t%-*s %-16" PRIu64 "\n",
+				RTE_VDPA_STATS_NAME_SIZE,
+				vport->stats_names[vport->stats[i].id].name,
+				vport->stats[i].value);
+		}
+	}
+}
+
+cmdline_parse_token_string_t cmd_device_stats_ =
+	TOKEN_STRING_INITIALIZER(struct cmd_stats_result, stats, "stats");
+cmdline_parse_token_num_t cmd_device_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_stats_result, did, UINT32);
+cmdline_parse_token_num_t cmd_queue_id =
+	TOKEN_NUM_INITIALIZER(struct cmd_stats_result, qid, UINT32);
+
+cmdline_parse_inst_t cmd_device_stats = {
+	.f = cmd_device_stats_parsed,
+	.data = NULL,
+	.help_str = "stats: show device statistics",
+	.tokens = {
+		(void *)&cmd_device_stats_,
+		(void *)&cmd_device_id,
+		(void *)&cmd_queue_id,
+		NULL,
+	},
+};
+
 /* *** QUIT *** */
 struct cmd_quit_result {
 	cmdline_fixed_string_t quit;
@@ -392,6 +510,7 @@ static void cmd_quit_parsed(__rte_unused void *parsed_result,
 	(cmdline_parse_inst_t *)&cmd_help,
 	(cmdline_parse_inst_t *)&cmd_list_vdpa_devices,
 	(cmdline_parse_inst_t *)&cmd_create_vdpa_port,
+	(cmdline_parse_inst_t *)&cmd_device_stats,
 	(cmdline_parse_inst_t *)&cmd_quit,
 	NULL,
 };
-- 
1.8.3.1


  parent reply	other threads:[~2020-06-02 15:48 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-02 11:26 [dpdk-dev] [PATCH 0/4] vhost: support vDPA virtio queue statistics Matan Azrad
2020-04-02 11:26 ` [dpdk-dev] [PATCH 1/4] vhost: inroduce operation to get vDPA queue stats Matan Azrad
2020-04-15 14:36   ` Maxime Coquelin
2020-04-16  9:06     ` Matan Azrad
2020-04-16 13:19       ` Maxime Coquelin
2020-04-19  6:18         ` Shahaf Shuler
2020-04-20  7:13           ` Maxime Coquelin
2020-04-20 15:57             ` Shahaf Shuler
2020-04-20 16:18               ` Maxime Coquelin
2020-04-21  5:02                 ` Shahaf Shuler
2020-04-02 11:26 ` [dpdk-dev] [PATCH 2/4] common/mlx5: support DevX virtq stats operations Matan Azrad
2020-04-02 11:26 ` [dpdk-dev] [PATCH 3/4] vdpa/mlx5: support virtio queue statistics get Matan Azrad
2020-04-02 11:26 ` [dpdk-dev] [PATCH 4/4] examples/vdpa: add statistics show command Matan Azrad
2020-05-05 15:54 ` [dpdk-dev] [PATCH v2 0/4] vhost: support vDPA virtio queue statistics Matan Azrad
2020-05-05 15:54   ` [dpdk-dev] [PATCH v2 1/4] vhost: inroduce operation to get vDPA queue stats Matan Azrad
2020-05-05 15:54   ` [dpdk-dev] [PATCH v2 2/4] common/mlx5: support DevX virtq stats operations Matan Azrad
2020-05-05 15:54   ` [dpdk-dev] [PATCH v2 3/4] vdpa/mlx5: support virtio queue statistics get Matan Azrad
2020-05-05 15:54   ` [dpdk-dev] [PATCH v2 4/4] examples/vdpa: add statistics show command Matan Azrad
2020-05-07 11:35   ` [dpdk-dev] [PATCH v2 0/4] vhost: support vDPA virtio queue statistics Matan Azrad
2020-06-02 15:47   ` [dpdk-dev] [PATCH v3 " Matan Azrad
2020-06-02 15:47     ` [dpdk-dev] [PATCH v3 1/4] vhost: inroduce operation to get vDPA queue stats Matan Azrad
2020-06-03  8:58       ` Maxime Coquelin
2020-06-04 10:36         ` Wang, Xiao W
2020-06-09  9:18           ` Maxime Coquelin
2020-06-02 15:47     ` [dpdk-dev] [PATCH v3 2/4] common/mlx5: support DevX virtq stats operations Matan Azrad
2020-06-18 10:58       ` Maxime Coquelin
2020-06-02 15:47     ` [dpdk-dev] [PATCH v3 3/4] vdpa/mlx5: support virtio queue statistics get Matan Azrad
2020-06-18 11:05       ` Maxime Coquelin
2020-06-02 15:47     ` Matan Azrad [this message]
2020-06-18 12:13       ` [dpdk-dev] [PATCH v3 4/4] examples/vdpa: add statistics show command Maxime Coquelin
2020-06-18 16:29     ` [dpdk-dev] [PATCH v3 0/4] vhost: support vDPA virtio queue statistics Maxime Coquelin
2020-06-19  6:01       ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1591112869-78828-5-git-send-email-matan@mellanox.com \
    --to=matan@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=shahafs@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).