DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Christensen <drc@linux.vnet.ibm.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com,
	zhihong.wang@intel.com,  dev@dpdk.org
Cc: David Christensen <drc@linux.vnet.ibm.com>
Subject: [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats
Date: Wed,  2 Sep 2020 10:03:09 -0700	[thread overview]
Message-ID: <20200902170309.16513-1-drc@linux.vnet.ibm.com> (raw)

The PMD API allows stats and xstats values to be cleared separately.
This is a problem for the vhost PMD since some of the xstats values are
derived from existing stats values.  For example:

testpmd> show port xstats all
...
tx_unicast_packets: 17562959
...
testpmd> clear port stats all
...
show port xstats all
...
tx_unicast_packets: 18446744073709551615
...

Modify the driver so that stats and xstats values are stored, updated,
and cleared separately.

Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
---
 drivers/net/vhost/rte_eth_vhost.c | 54 ++++++++++++++++++-------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index e55278af6..4e72cc2ca 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
 	VHOST_BROADCAST_PKT,
 	VHOST_MULTICAST_PKT,
 	VHOST_UNICAST_PKT,
+	VHOST_PKT,
+	VHOST_BYTE,
+	VHOST_MISSED_PKT,
 	VHOST_ERRORS_PKT,
 	VHOST_ERRORS_FRAGMENTED,
 	VHOST_ERRORS_JABBER,
@@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
 /* [rx]_is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 /* [tx]_ is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -291,18 +294,11 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		vq = dev->data->rx_queues[i];
 		if (!vq)
 			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
 	}
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		vq = dev->data->tx_queues[i];
 		if (!vq)
 			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				+ vq->stats.missed_pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
 	}
 	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
 		xstats[count].value = 0;
@@ -346,20 +342,27 @@ vhost_count_multicast_broadcast(struct vhost_queue *vq,
 			pstats->xstats[VHOST_BROADCAST_PKT]++;
 		else
 			pstats->xstats[VHOST_MULTICAST_PKT]++;
+	} else {
+		pstats->xstats[VHOST_UNICAST_PKT]++;
 	}
 }
 
 static void
-vhost_update_packet_xstats(struct vhost_queue *vq,
-			   struct rte_mbuf **bufs,
-			   uint16_t count)
+vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
+			   uint16_t count, uint64_t nb_bytes,
+			   uint64_t nb_missed)
 {
 	uint32_t pkt_len = 0;
 	uint64_t i = 0;
 	uint64_t index;
 	struct vhost_stats *pstats = &vq->stats;
 
+	pstats->xstats[VHOST_BYTE] += nb_bytes;
+	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
+	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
+
 	for (i = 0; i < count ; i++) {
+		pstats->xstats[VHOST_PKT]++;
 		pkt_len = bufs[i]->pkt_len;
 		if (pkt_len == 64) {
 			pstats->xstats[VHOST_64_PKT]++;
@@ -385,6 +388,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_rx = 0;
 	uint16_t nb_receive = nb_bufs;
+	uint64_t nb_bytes = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -419,10 +423,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		if (r->internal->vlan_strip)
 			rte_vlan_strip(bufs[i]);
 
-		r->stats.bytes += bufs[i]->pkt_len;
+		nb_bytes += bufs[i]->pkt_len;
 	}
 
-	vhost_update_packet_xstats(r, bufs, nb_rx);
+	r->stats.bytes += nb_bytes;
+	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
 
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
@@ -436,6 +441,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_tx = 0;
 	uint16_t nb_send = 0;
+	uint64_t nb_bytes = 0;
+	uint64_t nb_missed = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -476,13 +483,16 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			break;
 	}
 
+	for (i = 0; likely(i < nb_tx); i++)
+		nb_bytes += bufs[i]->pkt_len;
+
+	nb_missed = nb_bufs - nb_tx;
+
 	r->stats.pkts += nb_tx;
+	r->stats.bytes += nb_bytes;
 	r->stats.missed_pkts += nb_bufs - nb_tx;
 
-	for (i = 0; likely(i < nb_tx); i++)
-		r->stats.bytes += bufs[i]->pkt_len;
-
-	vhost_update_packet_xstats(r, bufs, nb_tx);
+	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
 
 	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
 	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
-- 
2.18.4


             reply	other threads:[~2020-09-02 17:03 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-02 17:03 David Christensen [this message]
2020-09-11  7:44 ` Xia, Chenbo
2020-09-23  8:07   ` Maxime Coquelin
2020-10-05 17:43     ` David Christensen
2020-10-06  7:32       ` Maxime Coquelin
2020-10-06 21:23 ` [dpdk-dev] [PATCH v2] " David Christensen
2020-10-09  3:13   ` Xia, Chenbo
2020-10-15 17:49   ` [dpdk-dev] [PATCH v3] " David Christensen
2020-10-16  1:38     ` Xia, Chenbo
2020-10-23 10:54     ` Maxime Coquelin
2020-10-23 11:22     ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200902170309.16513-1-drc@linux.vnet.ibm.com \
    --to=drc@linux.vnet.ibm.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).