DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
	i.maximets@ovn.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH 2/5] net/vhost: move to Vhost library stats API
Date: Tue, 10 May 2022 22:17:17 +0200	[thread overview]
Message-ID: <20220510201720.1262368-3-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20220510201720.1262368-1-maxime.coquelin@redhat.com>

Now that we have Vhost statistics APIs, this patch replaces
Vhost PMD extended statistics implementation with calls
to the new API. It will enable getting more statistics for
counters that cannot be implemented at the PMD level.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
 drivers/net/vhost/rte_eth_vhost.c | 348 +++++++++++-------------------
 1 file changed, 122 insertions(+), 226 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index a248a65df4..8dee629fb0 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -59,33 +59,10 @@ static struct rte_ether_addr base_eth_addr = {
 	}
 };
 
-enum vhost_xstats_pkts {
-	VHOST_UNDERSIZE_PKT = 0,
-	VHOST_64_PKT,
-	VHOST_65_TO_127_PKT,
-	VHOST_128_TO_255_PKT,
-	VHOST_256_TO_511_PKT,
-	VHOST_512_TO_1023_PKT,
-	VHOST_1024_TO_1522_PKT,
-	VHOST_1523_TO_MAX_PKT,
-	VHOST_BROADCAST_PKT,
-	VHOST_MULTICAST_PKT,
-	VHOST_UNICAST_PKT,
-	VHOST_PKT,
-	VHOST_BYTE,
-	VHOST_MISSED_PKT,
-	VHOST_ERRORS_PKT,
-	VHOST_ERRORS_FRAGMENTED,
-	VHOST_ERRORS_JABBER,
-	VHOST_UNKNOWN_PROTOCOL,
-	VHOST_XSTATS_MAX,
-};
-
 struct vhost_stats {
 	uint64_t pkts;
 	uint64_t bytes;
 	uint64_t missed_pkts;
-	uint64_t xstats[VHOST_XSTATS_MAX];
 };
 
 struct vhost_queue {
@@ -140,138 +117,92 @@ struct rte_vhost_vring_state {
 
 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
 
-#define VHOST_XSTATS_NAME_SIZE 64
-
-struct vhost_xstats_name_off {
-	char name[VHOST_XSTATS_NAME_SIZE];
-	uint64_t offset;
-};
-
-/* [rx]_is prepended to the name string here */
-static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
-	{"good_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
-	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
-	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
-	{"broadcast_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
-	{"multicast_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
-	{"unicast_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
-	 {"undersize_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
-	{"size_64_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
-	{"size_65_to_127_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
-	{"size_128_to_255_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
-	{"size_256_to_511_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
-	{"size_512_to_1023_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
-	{"size_1024_to_1522_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
-	{"size_1523_to_max_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
-	{"errors_with_bad_CRC",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
-	{"fragmented_errors",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
-	{"jabber_errors",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
-	{"unknown_protos_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
-};
-
-/* [tx]_ is prepended to the name string here */
-static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
-	{"good_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
-	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
-	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
-	{"broadcast_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
-	{"multicast_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
-	{"unicast_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
-	{"undersize_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
-	{"size_64_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
-	{"size_65_to_127_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
-	{"size_128_to_255_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
-	{"size_256_to_511_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
-	{"size_512_to_1023_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
-	{"size_1024_to_1522_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
-	{"size_1523_to_max_packets",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
-	{"errors_with_bad_CRC",
-	 offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
-};
-
-#define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
-				sizeof(vhost_rxport_stat_strings[0]))
-
-#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
-				sizeof(vhost_txport_stat_strings[0]))
-
 static int
 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
 {
-	struct vhost_queue *vq = NULL;
-	unsigned int i = 0;
+	struct vhost_queue *vq;
+	int ret, i;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		vq = dev->data->rx_queues[i];
-		if (!vq)
-			continue;
-		memset(&vq->stats, 0, sizeof(vq->stats));
+		ret = rte_vhost_vring_stats_reset(vq->vid, vq->virtqueue_id);
+		if (ret < 0)
+			return ret;
 	}
+
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		vq = dev->data->tx_queues[i];
-		if (!vq)
-			continue;
-		memset(&vq->stats, 0, sizeof(vq->stats));
+		ret = rte_vhost_vring_stats_reset(vq->vid, vq->virtqueue_id);
+		if (ret < 0)
+			return ret;
 	}
 
 	return 0;
 }
 
 static int
-vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
 			   struct rte_eth_xstat_name *xstats_names,
-			   unsigned int limit __rte_unused)
+			   unsigned int limit)
 {
-	unsigned int t = 0;
-	int count = 0;
-	int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+	struct rte_vhost_stat_name *name;
+	struct vhost_queue *vq;
+	int ret, i, count = 0, nstats = 0;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		vq = dev->data->rx_queues[i];
+		ret = rte_vhost_vring_stats_get_names(vq->vid, vq->virtqueue_id, NULL, 0);
+		if (ret < 0)
+			return ret;
 
-	if (!xstats_names)
+		nstats += ret;
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		vq = dev->data->tx_queues[i];
+		ret = rte_vhost_vring_stats_get_names(vq->vid, vq->virtqueue_id, NULL, 0);
+		if (ret < 0)
+			return ret;
+
+		nstats += ret;
+	}
+
+	if (!xstats_names || limit < (unsigned int)nstats)
 		return nstats;
-	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
-		snprintf(xstats_names[count].name,
-			 sizeof(xstats_names[count].name),
-			 "rx_%s", vhost_rxport_stat_strings[t].name);
-		count++;
-	}
-	for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
-		snprintf(xstats_names[count].name,
-			 sizeof(xstats_names[count].name),
-			 "tx_%s", vhost_txport_stat_strings[t].name);
-		count++;
+
+	name = calloc(nstats, sizeof(*name));
+	if (!name)
+		return -1;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		vq = dev->data->rx_queues[i];
+		ret = rte_vhost_vring_stats_get_names(vq->vid, vq->virtqueue_id,
+				name + count, nstats - count);
+		if (ret < 0) {
+			free(name);
+			return ret;
+		}
+
+		count += ret;
 	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		vq = dev->data->tx_queues[i];
+		ret = rte_vhost_vring_stats_get_names(vq->vid, vq->virtqueue_id,
+				name + count, nstats - count);
+		if (ret < 0) {
+			free(name);
+			return ret;
+		}
+
+		count += ret;
+	}
+
+	for (i = 0; i < count; i++)
+		strncpy(xstats_names[i].name, name[i].name, RTE_ETH_XSTATS_NAME_SIZE);
+
+	free(name);
+
 	return count;
 }
 
@@ -279,86 +210,67 @@ static int
 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		     unsigned int n)
 {
-	unsigned int i;
-	unsigned int t;
-	unsigned int count = 0;
-	struct vhost_queue *vq = NULL;
-	unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
-
-	if (n < nxstats)
-		return nxstats;
-
-	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
-		xstats[count].value = 0;
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			vq = dev->data->rx_queues[i];
-			if (!vq)
-				continue;
-			xstats[count].value +=
-				*(uint64_t *)(((char *)vq)
-				+ vhost_rxport_stat_strings[t].offset);
-		}
-		xstats[count].id = count;
-		count++;
+	struct rte_vhost_stat *stats;
+	struct vhost_queue *vq;
+	int ret, i, count = 0, nstats = 0;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		vq = dev->data->rx_queues[i];
+		ret = rte_vhost_vring_stats_get(vq->vid, vq->virtqueue_id, NULL, 0);
+		if (ret < 0)
+			return ret;
+
+		nstats += ret;
 	}
-	for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
-		xstats[count].value = 0;
-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
-			vq = dev->data->tx_queues[i];
-			if (!vq)
-				continue;
-			xstats[count].value +=
-				*(uint64_t *)(((char *)vq)
-				+ vhost_txport_stat_strings[t].offset);
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		vq = dev->data->tx_queues[i];
+		ret = rte_vhost_vring_stats_get(vq->vid, vq->virtqueue_id, NULL, 0);
+		if (ret < 0)
+			return ret;
+
+		nstats += ret;
+	}
+
+	if (!xstats || n < (unsigned int)nstats)
+		return nstats;
+
+	stats = calloc(nstats, sizeof(*stats));
+	if (!stats)
+		return -1;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		vq = dev->data->rx_queues[i];
+		ret = rte_vhost_vring_stats_get(vq->vid, vq->virtqueue_id,
+				stats + count, nstats - count);
+		if (ret < 0) {
+			free(stats);
+			return ret;
 		}
-		xstats[count].id = count;
-		count++;
+
+		count += ret;
 	}
-	return count;
-}
 
-static inline void
-vhost_count_xcast_packets(struct vhost_queue *vq,
-				struct rte_mbuf *mbuf)
-{
-	struct rte_ether_addr *ea = NULL;
-	struct vhost_stats *pstats = &vq->stats;
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		vq = dev->data->tx_queues[i];
+		ret = rte_vhost_vring_stats_get(vq->vid, vq->virtqueue_id,
+				stats + count, nstats - count);
+		if (ret < 0) {
+			free(stats);
+			return ret;
+		}
 
-	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
-	if (rte_is_multicast_ether_addr(ea)) {
-		if (rte_is_broadcast_ether_addr(ea))
-			pstats->xstats[VHOST_BROADCAST_PKT]++;
-		else
-			pstats->xstats[VHOST_MULTICAST_PKT]++;
-	} else {
-		pstats->xstats[VHOST_UNICAST_PKT]++;
+		count += ret;
 	}
-}
 
-static __rte_always_inline void
-vhost_update_single_packet_xstats(struct vhost_queue *vq, struct rte_mbuf *buf)
-{
-	uint32_t pkt_len = 0;
-	uint64_t index;
-	struct vhost_stats *pstats = &vq->stats;
-
-	pstats->xstats[VHOST_PKT]++;
-	pkt_len = buf->pkt_len;
-	if (pkt_len == 64) {
-		pstats->xstats[VHOST_64_PKT]++;
-	} else if (pkt_len > 64 && pkt_len < 1024) {
-		index = (sizeof(pkt_len) * 8)
-			- __builtin_clz(pkt_len) - 5;
-		pstats->xstats[index]++;
-	} else {
-		if (pkt_len < 64)
-			pstats->xstats[VHOST_UNDERSIZE_PKT]++;
-		else if (pkt_len <= 1522)
-			pstats->xstats[VHOST_1024_TO_1522_PKT]++;
-		else if (pkt_len > 1522)
-			pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
-	}
-	vhost_count_xcast_packets(vq, buf);
+	for (i = 0; i < count; i++) {
+		xstats[i].id = stats[i].id;
+		xstats[i].value = stats[i].value;
+	}
+
+	free(stats);
+
+	return nstats;
 }
 
 static uint16_t
@@ -402,9 +314,6 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			rte_vlan_strip(bufs[i]);
 
 		r->stats.bytes += bufs[i]->pkt_len;
-		r->stats.xstats[VHOST_BYTE] += bufs[i]->pkt_len;
-
-		vhost_update_single_packet_xstats(r, bufs[i]);
 	}
 
 out:
@@ -461,10 +370,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			break;
 	}
 
-	for (i = 0; likely(i < nb_tx); i++) {
+	for (i = 0; likely(i < nb_tx); i++)
 		nb_bytes += bufs[i]->pkt_len;
-		vhost_update_single_packet_xstats(r, bufs[i]);
-	}
 
 	nb_missed = nb_bufs - nb_tx;
 
@@ -472,17 +379,6 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	r->stats.bytes += nb_bytes;
 	r->stats.missed_pkts += nb_missed;
 
-	r->stats.xstats[VHOST_BYTE] += nb_bytes;
-	r->stats.xstats[VHOST_MISSED_PKT] += nb_missed;
-	r->stats.xstats[VHOST_UNICAST_PKT] += nb_missed;
-
-	/* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
-	 * ifHCOutBroadcastPkts counters are increased when packets are not
-	 * transmitted successfully.
-	 */
-	for (i = nb_tx; i < nb_bufs; i++)
-		vhost_count_xcast_packets(r, bufs[i]);
-
 	for (i = 0; likely(i < nb_tx); i++)
 		rte_pktmbuf_free(bufs[i]);
 out:
@@ -1566,7 +1462,7 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
 	int ret = 0;
 	char *iface_name;
 	uint16_t queues;
-	uint64_t flags = 0;
+	uint64_t flags = RTE_VHOST_USER_NET_STATS_ENABLE;
 	uint64_t disable_flags = 0;
 	int client_mode = 0;
 	int iommu_support = 0;
-- 
2.35.1


  parent reply	other threads:[~2022-05-10 20:17 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-10 20:17 [PATCH 0/5] vhost: introduce per-virtqueue " Maxime Coquelin
2022-05-10 20:17 ` [PATCH 1/5] vhost: add per-virtqueue statistics support Maxime Coquelin
2022-05-11 13:56   ` Xia, Chenbo
2022-05-10 20:17 ` Maxime Coquelin [this message]
2022-05-10 20:17 ` [PATCH 3/5] vhost: add statistics for guest notifications Maxime Coquelin
2022-05-10 20:17 ` [PATCH 4/5] vhost: add statistics for IOTLB Maxime Coquelin
2022-05-11 13:56   ` Xia, Chenbo
2022-05-10 20:17 ` [PATCH 5/5] vhost: add statistics for in-flight packets Maxime Coquelin
2022-05-11 13:57   ` Xia, Chenbo
2022-05-11  7:24 ` [PATCH 0/5] vhost: introduce per-virtqueue stats API Maxime Coquelin
2022-05-17 13:23 ` Maxime Coquelin
2022-05-17 13:33   ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220510201720.1262368-3-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=i.maximets@ovn.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).