DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats
@ 2020-09-02 17:03 David Christensen
  2020-09-11  7:44 ` Xia, Chenbo
  2020-10-06 21:23 ` [dpdk-dev] [PATCH v2] " David Christensen
  0 siblings, 2 replies; 11+ messages in thread
From: David Christensen @ 2020-09-02 17:03 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia, zhihong.wang, dev; +Cc: David Christensen

The PMD API allows stats and xstats values to be cleared separately.
This is a problem for the vhost PMD since some of the xstats values are
derived from existing stats values.  For example:

testpmd> show port xstats all
...
tx_unicast_packets: 17562959
...
testpmd> clear port stats all
...
show port xstats all
...
tx_unicast_packets: 18446744073709551615
...

Modify the driver so that stats and xstats values are stored, updated,
and cleared separately.

Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
---
 drivers/net/vhost/rte_eth_vhost.c | 54 ++++++++++++++++++-------------
 1 file changed, 32 insertions(+), 22 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index e55278af6..4e72cc2ca 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
 	VHOST_BROADCAST_PKT,
 	VHOST_MULTICAST_PKT,
 	VHOST_UNICAST_PKT,
+	VHOST_PKT,
+	VHOST_BYTE,
+	VHOST_MISSED_PKT,
 	VHOST_ERRORS_PKT,
 	VHOST_ERRORS_FRAGMENTED,
 	VHOST_ERRORS_JABBER,
@@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
 /* [rx]_is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 /* [tx]_ is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -291,18 +294,11 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		vq = dev->data->rx_queues[i];
 		if (!vq)
 			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
 	}
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		vq = dev->data->tx_queues[i];
 		if (!vq)
 			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				+ vq->stats.missed_pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
 	}
 	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
 		xstats[count].value = 0;
@@ -346,20 +342,27 @@ vhost_count_multicast_broadcast(struct vhost_queue *vq,
 			pstats->xstats[VHOST_BROADCAST_PKT]++;
 		else
 			pstats->xstats[VHOST_MULTICAST_PKT]++;
+	} else {
+		pstats->xstats[VHOST_UNICAST_PKT]++;
 	}
 }
 
 static void
-vhost_update_packet_xstats(struct vhost_queue *vq,
-			   struct rte_mbuf **bufs,
-			   uint16_t count)
+vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
+			   uint16_t count, uint64_t nb_bytes,
+			   uint64_t nb_missed)
 {
 	uint32_t pkt_len = 0;
 	uint64_t i = 0;
 	uint64_t index;
 	struct vhost_stats *pstats = &vq->stats;
 
+	pstats->xstats[VHOST_BYTE] += nb_bytes;
+	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
+	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
+
 	for (i = 0; i < count ; i++) {
+		pstats->xstats[VHOST_PKT]++;
 		pkt_len = bufs[i]->pkt_len;
 		if (pkt_len == 64) {
 			pstats->xstats[VHOST_64_PKT]++;
@@ -385,6 +388,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_rx = 0;
 	uint16_t nb_receive = nb_bufs;
+	uint64_t nb_bytes = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -419,10 +423,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		if (r->internal->vlan_strip)
 			rte_vlan_strip(bufs[i]);
 
-		r->stats.bytes += bufs[i]->pkt_len;
+		nb_bytes += bufs[i]->pkt_len;
 	}
 
-	vhost_update_packet_xstats(r, bufs, nb_rx);
+	r->stats.bytes += nb_bytes;
+	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
 
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
@@ -436,6 +441,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_tx = 0;
 	uint16_t nb_send = 0;
+	uint64_t nb_bytes = 0;
+	uint64_t nb_missed = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -476,13 +483,16 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			break;
 	}
 
+	for (i = 0; likely(i < nb_tx); i++)
+		nb_bytes += bufs[i]->pkt_len;
+
+	nb_missed = nb_bufs - nb_tx;
+
 	r->stats.pkts += nb_tx;
+	r->stats.bytes += nb_bytes;
 	r->stats.missed_pkts += nb_bufs - nb_tx;
 
-	for (i = 0; likely(i < nb_tx); i++)
-		r->stats.bytes += bufs[i]->pkt_len;
-
-	vhost_update_packet_xstats(r, bufs, nb_tx);
+	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
 
 	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
 	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
-- 
2.18.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats
  2020-09-02 17:03 [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats David Christensen
@ 2020-09-11  7:44 ` Xia, Chenbo
  2020-09-23  8:07   ` Maxime Coquelin
  2020-10-06 21:23 ` [dpdk-dev] [PATCH v2] " David Christensen
  1 sibling, 1 reply; 11+ messages in thread
From: Xia, Chenbo @ 2020-09-11  7:44 UTC (permalink / raw)
  To: David Christensen, maxime.coquelin, Wang, Zhihong, dev

Hi David,

Thanks for working on this. Comments inline.

> -----Original Message-----
> From: David Christensen <drc@linux.vnet.ibm.com>
> Sent: Thursday, September 3, 2020 1:03 AM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>; Wang,
> Zhihong <zhihong.wang@intel.com>; dev@dpdk.org
> Cc: David Christensen <drc@linux.vnet.ibm.com>
> Subject: [PATCH] net/vhost: fix xstats wrong after clearing stats
> 
> The PMD API allows stats and xstats values to be cleared separately.
> This is a problem for the vhost PMD since some of the xstats values are
> derived from existing stats values.  For example:
> 
> testpmd> show port xstats all
> ...
> tx_unicast_packets: 17562959
> ...
> testpmd> clear port stats all
> ...
> show port xstats all
> ...
> tx_unicast_packets: 18446744073709551615
> ...
> 
> Modify the driver so that stats and xstats values are stored, updated,
> and cleared separately.

I think it's fix patch. So please add 'Fixes:XXX' and cc to stable@dpdk.org
in your commit message.

> 
> Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
> ---
>  drivers/net/vhost/rte_eth_vhost.c | 54 ++++++++++++++++++-------------
>  1 file changed, 32 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/net/vhost/rte_eth_vhost.c
> b/drivers/net/vhost/rte_eth_vhost.c
> index e55278af6..4e72cc2ca 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
>  	VHOST_BROADCAST_PKT,
>  	VHOST_MULTICAST_PKT,
>  	VHOST_UNICAST_PKT,
> +	VHOST_PKT,
> +	VHOST_BYTE,
> +	VHOST_MISSED_PKT,
>  	VHOST_ERRORS_PKT,
>  	VHOST_ERRORS_FRAGMENTED,
>  	VHOST_ERRORS_JABBER,
> @@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
>  /* [rx]_is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off
> vhost_rxport_stat_strings[] = {
>  /* [tx]_ is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -291,18 +294,11 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  		vq = dev->data->rx_queues[i];
>  		if (!vq)
>  			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
>  	}

Why not delete the for loop here?

>  	for (i = 0; i < dev->data->nb_tx_queues; i++) {
>  		vq = dev->data->tx_queues[i];
>  		if (!vq)
>  			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				+ vq->stats.missed_pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
>  	}

Ditto.

>  	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
>  		xstats[count].value = 0;
> @@ -346,20 +342,27 @@ vhost_count_multicast_broadcast(struct vhost_queue
> *vq,
>  			pstats->xstats[VHOST_BROADCAST_PKT]++;
>  		else
>  			pstats->xstats[VHOST_MULTICAST_PKT]++;
> +	} else {
> +		pstats->xstats[VHOST_UNICAST_PKT]++;

As this function also count unicast pkts now. The function name should better
be changed. Besides, in 'eth_vhost_tx' which calls this function, there's a
comment about why we calls the function. I think that should also be updated.

Thanks!
Chenbo

>  	}
>  }
> 
>  static void
> -vhost_update_packet_xstats(struct vhost_queue *vq,
> -			   struct rte_mbuf **bufs,
> -			   uint16_t count)
> +vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
> +			   uint16_t count, uint64_t nb_bytes,
> +			   uint64_t nb_missed)
>  {
>  	uint32_t pkt_len = 0;
>  	uint64_t i = 0;
>  	uint64_t index;
>  	struct vhost_stats *pstats = &vq->stats;
> 
> +	pstats->xstats[VHOST_BYTE] += nb_bytes;
> +	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
> +	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
> +
>  	for (i = 0; i < count ; i++) {
> +		pstats->xstats[VHOST_PKT]++;
>  		pkt_len = bufs[i]->pkt_len;
>  		if (pkt_len == 64) {
>  			pstats->xstats[VHOST_64_PKT]++;
> @@ -385,6 +388,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_rx = 0;
>  	uint16_t nb_receive = nb_bufs;
> +	uint64_t nb_bytes = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -419,10 +423,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  		if (r->internal->vlan_strip)
>  			rte_vlan_strip(bufs[i]);
> 
> -		r->stats.bytes += bufs[i]->pkt_len;
> +		nb_bytes += bufs[i]->pkt_len;
>  	}
> 
> -	vhost_update_packet_xstats(r, bufs, nb_rx);
> +	r->stats.bytes += nb_bytes;
> +	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
> 
>  out:
>  	rte_atomic32_set(&r->while_queuing, 0);
> @@ -436,6 +441,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_tx = 0;
>  	uint16_t nb_send = 0;
> +	uint64_t nb_bytes = 0;
> +	uint64_t nb_missed = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -476,13 +483,16 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  			break;
>  	}
> 
> +	for (i = 0; likely(i < nb_tx); i++)
> +		nb_bytes += bufs[i]->pkt_len;
> +
> +	nb_missed = nb_bufs - nb_tx;
> +
>  	r->stats.pkts += nb_tx;
> +	r->stats.bytes += nb_bytes;
>  	r->stats.missed_pkts += nb_bufs - nb_tx;
> 
> -	for (i = 0; likely(i < nb_tx); i++)
> -		r->stats.bytes += bufs[i]->pkt_len;
> -
> -	vhost_update_packet_xstats(r, bufs, nb_tx);
> +	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
> 
>  	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
>  	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
> --
> 2.18.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats
  2020-09-11  7:44 ` Xia, Chenbo
@ 2020-09-23  8:07   ` Maxime Coquelin
  2020-10-05 17:43     ` David Christensen
  0 siblings, 1 reply; 11+ messages in thread
From: Maxime Coquelin @ 2020-09-23  8:07 UTC (permalink / raw)
  To: Xia, Chenbo, David Christensen, Wang, Zhihong, dev

Hi David,

Could you please post a v2 with Chenbo's comments taken into account?

Thanks,
Maxime

On 9/11/20 9:44 AM, Xia, Chenbo wrote:
> Hi David,
> 
> Thanks for working on this. Comments inline.
> 
>> -----Original Message-----
>> From: David Christensen <drc@linux.vnet.ibm.com>
>> Sent: Thursday, September 3, 2020 1:03 AM
>> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>; Wang,
>> Zhihong <zhihong.wang@intel.com>; dev@dpdk.org
>> Cc: David Christensen <drc@linux.vnet.ibm.com>
>> Subject: [PATCH] net/vhost: fix xstats wrong after clearing stats
>>
>> The PMD API allows stats and xstats values to be cleared separately.
>> This is a problem for the vhost PMD since some of the xstats values are
>> derived from existing stats values.  For example:
>>
>> testpmd> show port xstats all
>> ...
>> tx_unicast_packets: 17562959
>> ...
>> testpmd> clear port stats all
>> ...
>> show port xstats all
>> ...
>> tx_unicast_packets: 18446744073709551615
>> ...
>>
>> Modify the driver so that stats and xstats values are stored, updated,
>> and cleared separately.
> 
> I think it's fix patch. So please add 'Fixes:XXX' and cc to stable@dpdk.org
> in your commit message.
> 
>>
>> Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
>> ---
>>  drivers/net/vhost/rte_eth_vhost.c | 54 ++++++++++++++++++-------------
>>  1 file changed, 32 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/net/vhost/rte_eth_vhost.c
>> b/drivers/net/vhost/rte_eth_vhost.c
>> index e55278af6..4e72cc2ca 100644
>> --- a/drivers/net/vhost/rte_eth_vhost.c
>> +++ b/drivers/net/vhost/rte_eth_vhost.c
>> @@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
>>  	VHOST_BROADCAST_PKT,
>>  	VHOST_MULTICAST_PKT,
>>  	VHOST_UNICAST_PKT,
>> +	VHOST_PKT,
>> +	VHOST_BYTE,
>> +	VHOST_MISSED_PKT,
>>  	VHOST_ERRORS_PKT,
>>  	VHOST_ERRORS_FRAGMENTED,
>>  	VHOST_ERRORS_JABBER,
>> @@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
>>  /* [rx]_is prepended to the name string here */
>>  static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
>>  	{"good_packets",
>> -	 offsetof(struct vhost_queue, stats.pkts)},
>> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>>  	{"total_bytes",
>> -	 offsetof(struct vhost_queue, stats.bytes)},
>> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>>  	{"missed_pkts",
>> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
>> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>>  	{"broadcast_packets",
>>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>>  	{"multicast_packets",
>> @@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off
>> vhost_rxport_stat_strings[] = {
>>  /* [tx]_ is prepended to the name string here */
>>  static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
>>  	{"good_packets",
>> -	 offsetof(struct vhost_queue, stats.pkts)},
>> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>>  	{"total_bytes",
>> -	 offsetof(struct vhost_queue, stats.bytes)},
>> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>>  	{"missed_pkts",
>> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
>> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>>  	{"broadcast_packets",
>>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>>  	{"multicast_packets",
>> @@ -291,18 +294,11 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
>> rte_eth_xstat *xstats,
>>  		vq = dev->data->rx_queues[i];
>>  		if (!vq)
>>  			continue;
>> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
>> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
>> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
>>  	}
> 
> Why not delete the for loop here?
> 
>>  	for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>  		vq = dev->data->tx_queues[i];
>>  		if (!vq)
>>  			continue;
>> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
>> -				+ vq->stats.missed_pkts
>> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
>> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
>>  	}
> 
> Ditto.
> 
>>  	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
>>  		xstats[count].value = 0;
>> @@ -346,20 +342,27 @@ vhost_count_multicast_broadcast(struct vhost_queue
>> *vq,
>>  			pstats->xstats[VHOST_BROADCAST_PKT]++;
>>  		else
>>  			pstats->xstats[VHOST_MULTICAST_PKT]++;
>> +	} else {
>> +		pstats->xstats[VHOST_UNICAST_PKT]++;
> 
> As this function also count unicast pkts now. The function name should better
> be changed. Besides, in 'eth_vhost_tx' which calls this function, there's a
> comment about why we calls the function. I think that should also be updated.
> 
> Thanks!
> Chenbo
> 
>>  	}
>>  }
>>
>>  static void
>> -vhost_update_packet_xstats(struct vhost_queue *vq,
>> -			   struct rte_mbuf **bufs,
>> -			   uint16_t count)
>> +vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
>> +			   uint16_t count, uint64_t nb_bytes,
>> +			   uint64_t nb_missed)
>>  {
>>  	uint32_t pkt_len = 0;
>>  	uint64_t i = 0;
>>  	uint64_t index;
>>  	struct vhost_stats *pstats = &vq->stats;
>>
>> +	pstats->xstats[VHOST_BYTE] += nb_bytes;
>> +	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
>> +	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
>> +
>>  	for (i = 0; i < count ; i++) {
>> +		pstats->xstats[VHOST_PKT]++;
>>  		pkt_len = bufs[i]->pkt_len;
>>  		if (pkt_len == 64) {
>>  			pstats->xstats[VHOST_64_PKT]++;
>> @@ -385,6 +388,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t
>> nb_bufs)
>>  	struct vhost_queue *r = q;
>>  	uint16_t i, nb_rx = 0;
>>  	uint16_t nb_receive = nb_bufs;
>> +	uint64_t nb_bytes = 0;
>>
>>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>>  		return 0;
>> @@ -419,10 +423,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs,
>> uint16_t nb_bufs)
>>  		if (r->internal->vlan_strip)
>>  			rte_vlan_strip(bufs[i]);
>>
>> -		r->stats.bytes += bufs[i]->pkt_len;
>> +		nb_bytes += bufs[i]->pkt_len;
>>  	}
>>
>> -	vhost_update_packet_xstats(r, bufs, nb_rx);
>> +	r->stats.bytes += nb_bytes;
>> +	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
>>
>>  out:
>>  	rte_atomic32_set(&r->while_queuing, 0);
>> @@ -436,6 +441,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t
>> nb_bufs)
>>  	struct vhost_queue *r = q;
>>  	uint16_t i, nb_tx = 0;
>>  	uint16_t nb_send = 0;
>> +	uint64_t nb_bytes = 0;
>> +	uint64_t nb_missed = 0;
>>
>>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>>  		return 0;
>> @@ -476,13 +483,16 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs,
>> uint16_t nb_bufs)
>>  			break;
>>  	}
>>
>> +	for (i = 0; likely(i < nb_tx); i++)
>> +		nb_bytes += bufs[i]->pkt_len;
>> +
>> +	nb_missed = nb_bufs - nb_tx;
>> +
>>  	r->stats.pkts += nb_tx;
>> +	r->stats.bytes += nb_bytes;
>>  	r->stats.missed_pkts += nb_bufs - nb_tx;
>>
>> -	for (i = 0; likely(i < nb_tx); i++)
>> -		r->stats.bytes += bufs[i]->pkt_len;
>> -
>> -	vhost_update_packet_xstats(r, bufs, nb_tx);
>> +	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
>>
>>  	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
>>  	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
>> --
>> 2.18.4
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats
  2020-09-23  8:07   ` Maxime Coquelin
@ 2020-10-05 17:43     ` David Christensen
  2020-10-06  7:32       ` Maxime Coquelin
  0 siblings, 1 reply; 11+ messages in thread
From: David Christensen @ 2020-10-05 17:43 UTC (permalink / raw)
  To: Maxime Coquelin, Xia, Chenbo, Wang, Zhihong, dev



On 9/23/20 1:07 AM, Maxime Coquelin wrote:
> Hi David,
> 
> Could you please post a v2 with Chenbo's comments taken into account?

Sorry, been out of the office for a bit but now working through my 
backlog. I did make the change but discovered there were additional 
dependencies between stats and xstats that weren't addressed in the v1 
patch, specifically per queue packet/byte counters:

$ diff before.txt after.txt:
< tx_good_packets: 24769339
---
 > tx_good_packets: 0
6c6
< tx_good_bytes: 1501282012967
---
 > tx_good_bytes: 0
14,15c14,15
< tx_q0packets: 24769339
< tx_q0bytes: 1501282012967
---
 > tx_q0packets: 0
 > tx_q0bytes: 0
51c51
< tx_good_packets: 2653125
---
 > tx_good_packets: 0
53c53
< tx_good_bytes: 167404266
---
 > tx_good_bytes: 0
61,62c61,62
< tx_q0packets: 2653125
< tx_q0bytes: 167404266
---
 > tx_q0packets: 0
 > tx_q0bytes: 0

I'm looking at how to address these as well for a v2 patch soon.

Dave

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats
  2020-10-05 17:43     ` David Christensen
@ 2020-10-06  7:32       ` Maxime Coquelin
  0 siblings, 0 replies; 11+ messages in thread
From: Maxime Coquelin @ 2020-10-06  7:32 UTC (permalink / raw)
  To: David Christensen, Xia, Chenbo, Wang, Zhihong, dev



On 10/5/20 7:43 PM, David Christensen wrote:
> 
> 
> On 9/23/20 1:07 AM, Maxime Coquelin wrote:
>> Hi David,
>>
>> Could you please post a v2 with Chenbo's comments taken into account?
> 
> Sorry, been out of the office for a bit but now working through my
> backlog. I did make the change but discovered there were additional
> dependencies between stats and xstats that weren't addressed in the v1
> patch, specifically per queue packet/byte counters:
> 
> $ diff before.txt after.txt:
> < tx_good_packets: 24769339
> ---
>> tx_good_packets: 0
> 6c6
> < tx_good_bytes: 1501282012967
> ---
>> tx_good_bytes: 0
> 14,15c14,15
> < tx_q0packets: 24769339
> < tx_q0bytes: 1501282012967
> ---
>> tx_q0packets: 0
>> tx_q0bytes: 0
> 51c51
> < tx_good_packets: 2653125
> ---
>> tx_good_packets: 0
> 53c53
> < tx_good_bytes: 167404266
> ---
>> tx_good_bytes: 0
> 61,62c61,62
> < tx_q0packets: 2653125
> < tx_q0bytes: 167404266
> ---
>> tx_q0packets: 0
>> tx_q0bytes: 0
> 
> I'm looking at how to address these as well for a v2 patch soon.

Thanks Dave!

> Dave
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v2] net/vhost: fix xstats wrong after clearing stats
  2020-09-02 17:03 [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats David Christensen
  2020-09-11  7:44 ` Xia, Chenbo
@ 2020-10-06 21:23 ` David Christensen
  2020-10-09  3:13   ` Xia, Chenbo
  2020-10-15 17:49   ` [dpdk-dev] [PATCH v3] " David Christensen
  1 sibling, 2 replies; 11+ messages in thread
From: David Christensen @ 2020-10-06 21:23 UTC (permalink / raw)
  To: dev, maxime.coquelin, chenbo.xia, zhihong.wang
  Cc: stable, David Christensen, zhiyong.yang

The PMD API allows stats and xstats values to be cleared separately.
This is a problem for the vhost PMD since some of the xstats values are
derived from existing stats values.  For example:

testpmd> show port xstats all
...
tx_unicast_packets: 17562959
...
testpmd> clear port stats all
...
show port xstats all
...
tx_unicast_packets: 18446744073709551615
...

Modify the driver so that stats and xstats values are stored, updated,
and cleared separately.

Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")
Cc: zhiyong.yang@intel.com

Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
---
v2:
* Removed newly unused vq loops
* Added "fixes" message
* Renamed vhost_count_multicast_broadcast to vhost_count_xcast_packets

 drivers/net/vhost/rte_eth_vhost.c | 70 +++++++++++++++----------------
 1 file changed, 35 insertions(+), 35 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index e55278af6..163cf9409 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
 	VHOST_BROADCAST_PKT,
 	VHOST_MULTICAST_PKT,
 	VHOST_UNICAST_PKT,
+	VHOST_PKT,
+	VHOST_BYTE,
+	VHOST_MISSED_PKT,
 	VHOST_ERRORS_PKT,
 	VHOST_ERRORS_FRAGMENTED,
 	VHOST_ERRORS_JABBER,
@@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
 /* [rx]_is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 /* [tx]_ is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -287,23 +290,6 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	if (n < nxstats)
 		return nxstats;
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		vq = dev->data->rx_queues[i];
-		if (!vq)
-			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
-	}
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		vq = dev->data->tx_queues[i];
-		if (!vq)
-			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				+ vq->stats.missed_pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
-	}
 	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
 		xstats[count].value = 0;
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -334,7 +320,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 }
 
 static inline void
-vhost_count_multicast_broadcast(struct vhost_queue *vq,
+vhost_count_xcast_packets(struct vhost_queue *vq,
 				struct rte_mbuf *mbuf)
 {
 	struct rte_ether_addr *ea = NULL;
@@ -346,20 +332,27 @@ vhost_count_multicast_broadcast(struct vhost_queue *vq,
 			pstats->xstats[VHOST_BROADCAST_PKT]++;
 		else
 			pstats->xstats[VHOST_MULTICAST_PKT]++;
+	} else {
+		pstats->xstats[VHOST_UNICAST_PKT]++;
 	}
 }
 
 static void
-vhost_update_packet_xstats(struct vhost_queue *vq,
-			   struct rte_mbuf **bufs,
-			   uint16_t count)
+vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
+			   uint16_t count, uint64_t nb_bytes,
+			   uint64_t nb_missed)
 {
 	uint32_t pkt_len = 0;
 	uint64_t i = 0;
 	uint64_t index;
 	struct vhost_stats *pstats = &vq->stats;
 
+	pstats->xstats[VHOST_BYTE] += nb_bytes;
+	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
+	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
+
 	for (i = 0; i < count ; i++) {
+		pstats->xstats[VHOST_PKT]++;
 		pkt_len = bufs[i]->pkt_len;
 		if (pkt_len == 64) {
 			pstats->xstats[VHOST_64_PKT]++;
@@ -375,7 +368,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq,
 			else if (pkt_len > 1522)
 				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
 		}
-		vhost_count_multicast_broadcast(vq, bufs[i]);
+		vhost_count_xcast_packets(vq, bufs[i]);
 	}
 }
 
@@ -385,6 +378,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_rx = 0;
 	uint16_t nb_receive = nb_bufs;
+	uint64_t nb_bytes = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -419,10 +413,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		if (r->internal->vlan_strip)
 			rte_vlan_strip(bufs[i]);
 
-		r->stats.bytes += bufs[i]->pkt_len;
+		nb_bytes += bufs[i]->pkt_len;
 	}
 
-	vhost_update_packet_xstats(r, bufs, nb_rx);
+	r->stats.bytes += nb_bytes;
+	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
 
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
@@ -436,6 +431,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_tx = 0;
 	uint16_t nb_send = 0;
+	uint64_t nb_bytes = 0;
+	uint64_t nb_missed = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -476,20 +473,23 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			break;
 	}
 
+	for (i = 0; likely(i < nb_tx); i++)
+		nb_bytes += bufs[i]->pkt_len;
+
+	nb_missed = nb_bufs - nb_tx;
+
 	r->stats.pkts += nb_tx;
+	r->stats.bytes += nb_bytes;
 	r->stats.missed_pkts += nb_bufs - nb_tx;
 
-	for (i = 0; likely(i < nb_tx); i++)
-		r->stats.bytes += bufs[i]->pkt_len;
-
-	vhost_update_packet_xstats(r, bufs, nb_tx);
+	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
 
 	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
 	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
 	 * are increased when packets are not transmitted successfully.
 	 */
 	for (i = nb_tx; i < nb_bufs; i++)
-		vhost_count_multicast_broadcast(r, bufs[i]);
+		vhost_count_xcast_packets(r, bufs[i]);
 
 	for (i = 0; likely(i < nb_tx); i++)
 		rte_pktmbuf_free(bufs[i]);
-- 
2.18.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH v2] net/vhost: fix xstats wrong after clearing stats
  2020-10-06 21:23 ` [dpdk-dev] [PATCH v2] " David Christensen
@ 2020-10-09  3:13   ` Xia, Chenbo
  2020-10-15 17:49   ` [dpdk-dev] [PATCH v3] " David Christensen
  1 sibling, 0 replies; 11+ messages in thread
From: Xia, Chenbo @ 2020-10-09  3:13 UTC (permalink / raw)
  To: David Christensen, dev, maxime.coquelin, Wang, Zhihong
  Cc: stable, Yang, Zhiyong

Hi David,

> -----Original Message-----
> From: David Christensen <drc@linux.vnet.ibm.com>
> Sent: Wednesday, October 7, 2020 5:23 AM
> To: dev@dpdk.org; maxime.coquelin@redhat.com; Xia, Chenbo
> <chenbo.xia@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>
> Cc: stable@dpdk.org; David Christensen <drc@linux.vnet.ibm.com>; Yang,
> Zhiyong <zhiyong.yang@intel.com>
> Subject: [PATCH v2] net/vhost: fix xstats wrong after clearing stats
> 
> The PMD API allows stats and xstats values to be cleared separately.
> This is a problem for the vhost PMD since some of the xstats values are
> derived from existing stats values.  For example:
> 
> testpmd> show port xstats all
> ...
> tx_unicast_packets: 17562959
> ...
> testpmd> clear port stats all
> ...
> show port xstats all
> ...
> tx_unicast_packets: 18446744073709551615
> ...
> 
> Modify the driver so that stats and xstats values are stored, updated,
> and cleared separately.
> 
> Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")
> Cc: zhiyong.yang@intel.com

Better to replace this Cc with 'Cc: stable@dpdk.org' as other fix patches
do. You can cc zhiyong with git send-email command.

> 
> Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
> ---
> v2:
> * Removed newly unused vq loops
> * Added "fixes" message
> * Renamed vhost_count_multicast_broadcast to vhost_count_xcast_packets
> 
>  drivers/net/vhost/rte_eth_vhost.c | 70 +++++++++++++++----------------
>  1 file changed, 35 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/net/vhost/rte_eth_vhost.c
> b/drivers/net/vhost/rte_eth_vhost.c
> index e55278af6..163cf9409 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
>  	VHOST_BROADCAST_PKT,
>  	VHOST_MULTICAST_PKT,
>  	VHOST_UNICAST_PKT,
> +	VHOST_PKT,
> +	VHOST_BYTE,
> +	VHOST_MISSED_PKT,
>  	VHOST_ERRORS_PKT,
>  	VHOST_ERRORS_FRAGMENTED,
>  	VHOST_ERRORS_JABBER,
> @@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
>  /* [rx]_is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off
> vhost_rxport_stat_strings[] = {
>  /* [tx]_ is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -287,23 +290,6 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  	if (n < nxstats)
>  		return nxstats;
> 
> -	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -		vq = dev->data->rx_queues[i];
> -		if (!vq)
> -			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
> -	}
> -	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> -		vq = dev->data->tx_queues[i];
> -		if (!vq)
> -			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				+ vq->stats.missed_pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
> -	}
>  	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
>  		xstats[count].value = 0;
>  		for (i = 0; i < dev->data->nb_rx_queues; i++) {
> @@ -334,7 +320,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  }
> 
>  static inline void
> -vhost_count_multicast_broadcast(struct vhost_queue *vq,
> +vhost_count_xcast_packets(struct vhost_queue *vq,
>  				struct rte_mbuf *mbuf)
>  {
>  	struct rte_ether_addr *ea = NULL;
> @@ -346,20 +332,27 @@ vhost_count_multicast_broadcast(struct vhost_queue
> *vq,
>  			pstats->xstats[VHOST_BROADCAST_PKT]++;
>  		else
>  			pstats->xstats[VHOST_MULTICAST_PKT]++;
> +	} else {
> +		pstats->xstats[VHOST_UNICAST_PKT]++;
>  	}
>  }
> 
>  static void
> -vhost_update_packet_xstats(struct vhost_queue *vq,
> -			   struct rte_mbuf **bufs,
> -			   uint16_t count)
> +vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
> +			   uint16_t count, uint64_t nb_bytes,
> +			   uint64_t nb_missed)
>  {
>  	uint32_t pkt_len = 0;
>  	uint64_t i = 0;
>  	uint64_t index;
>  	struct vhost_stats *pstats = &vq->stats;
> 
> +	pstats->xstats[VHOST_BYTE] += nb_bytes;
> +	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
> +	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
> +
>  	for (i = 0; i < count ; i++) {
> +		pstats->xstats[VHOST_PKT]++;
>  		pkt_len = bufs[i]->pkt_len;
>  		if (pkt_len == 64) {
>  			pstats->xstats[VHOST_64_PKT]++;
> @@ -375,7 +368,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq,
>  			else if (pkt_len > 1522)
>  				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
>  		}
> -		vhost_count_multicast_broadcast(vq, bufs[i]);
> +		vhost_count_xcast_packets(vq, bufs[i]);
>  	}
>  }
> 
> @@ -385,6 +378,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_rx = 0;
>  	uint16_t nb_receive = nb_bufs;
> +	uint64_t nb_bytes = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -419,10 +413,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  		if (r->internal->vlan_strip)
>  			rte_vlan_strip(bufs[i]);
> 
> -		r->stats.bytes += bufs[i]->pkt_len;
> +		nb_bytes += bufs[i]->pkt_len;
>  	}
> 
> -	vhost_update_packet_xstats(r, bufs, nb_rx);
> +	r->stats.bytes += nb_bytes;
> +	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
> 
>  out:
>  	rte_atomic32_set(&r->while_queuing, 0);
> @@ -436,6 +431,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_tx = 0;
>  	uint16_t nb_send = 0;
> +	uint64_t nb_bytes = 0;
> +	uint64_t nb_missed = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -476,20 +473,23 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  			break;
>  	}
> 
> +	for (i = 0; likely(i < nb_tx); i++)
> +		nb_bytes += bufs[i]->pkt_len;
> +
> +	nb_missed = nb_bufs - nb_tx;
> +
>  	r->stats.pkts += nb_tx;
> +	r->stats.bytes += nb_bytes;
>  	r->stats.missed_pkts += nb_bufs - nb_tx;
> 
> -	for (i = 0; likely(i < nb_tx); i++)
> -		r->stats.bytes += bufs[i]->pkt_len;
> -
> -	vhost_update_packet_xstats(r, bufs, nb_tx);
> +	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
> 
>  	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
>  	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
>  	 * are increased when packets are not transmitted successfully.
>  	 */

I think the above comment should be updated because in the below function,
we also update unicast pkts too (Based on RFC2863).

Thanks!
Chenbo

>  	for (i = nb_tx; i < nb_bufs; i++)
> -		vhost_count_multicast_broadcast(r, bufs[i]);
> +		vhost_count_xcast_packets(r, bufs[i]);
> 
>  	for (i = 0; likely(i < nb_tx); i++)
>  		rte_pktmbuf_free(bufs[i]);
> --
> 2.18.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [dpdk-dev] [PATCH v3] net/vhost: fix xstats wrong after clearing stats
  2020-10-06 21:23 ` [dpdk-dev] [PATCH v2] " David Christensen
  2020-10-09  3:13   ` Xia, Chenbo
@ 2020-10-15 17:49   ` David Christensen
  2020-10-16  1:38     ` Xia, Chenbo
                       ` (2 more replies)
  1 sibling, 3 replies; 11+ messages in thread
From: David Christensen @ 2020-10-15 17:49 UTC (permalink / raw)
  To: dev, maxime.coquelin, chenbo.xia, zhihong.wang; +Cc: stable, David Christensen

The PMD API allows stats and xstats values to be cleared separately.
This is a problem for the vhost PMD since some of the xstats values are
derived from existing stats values.  For example:

testpmd> show port xstats all
...
tx_unicast_packets: 17562959
...
testpmd> clear port stats all
...
show port xstats all
...
tx_unicast_packets: 18446744073709551615
...

Modify the driver so that stats and xstats values are stored, updated,
and cleared separately.

Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")
Cc: stable@dpdk.org

Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
---
v3:
* Modified comment that unicast packets include unsent packets
* Change Cc: to stable@dpdk.org
v2:
* Removed newly unused vq loops
* Added "fixes" message
* Renamed vhost_count_multicast_broadcast to vhost_count_xcast_packets

 drivers/net/vhost/rte_eth_vhost.c | 76 +++++++++++++++----------------
 1 file changed, 38 insertions(+), 38 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index e55278af6..886b3afe0 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
 	VHOST_BROADCAST_PKT,
 	VHOST_MULTICAST_PKT,
 	VHOST_UNICAST_PKT,
+	VHOST_PKT,
+	VHOST_BYTE,
+	VHOST_MISSED_PKT,
 	VHOST_ERRORS_PKT,
 	VHOST_ERRORS_FRAGMENTED,
 	VHOST_ERRORS_JABBER,
@@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
 /* [rx]_is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 /* [tx]_ is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -287,23 +290,6 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	if (n < nxstats)
 		return nxstats;
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		vq = dev->data->rx_queues[i];
-		if (!vq)
-			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
-	}
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		vq = dev->data->tx_queues[i];
-		if (!vq)
-			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				+ vq->stats.missed_pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
-	}
 	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
 		xstats[count].value = 0;
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -334,7 +320,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 }
 
 static inline void
-vhost_count_multicast_broadcast(struct vhost_queue *vq,
+vhost_count_xcast_packets(struct vhost_queue *vq,
 				struct rte_mbuf *mbuf)
 {
 	struct rte_ether_addr *ea = NULL;
@@ -346,20 +332,27 @@ vhost_count_multicast_broadcast(struct vhost_queue *vq,
 			pstats->xstats[VHOST_BROADCAST_PKT]++;
 		else
 			pstats->xstats[VHOST_MULTICAST_PKT]++;
+	} else {
+		pstats->xstats[VHOST_UNICAST_PKT]++;
 	}
 }
 
 static void
-vhost_update_packet_xstats(struct vhost_queue *vq,
-			   struct rte_mbuf **bufs,
-			   uint16_t count)
+vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
+			   uint16_t count, uint64_t nb_bytes,
+			   uint64_t nb_missed)
 {
 	uint32_t pkt_len = 0;
 	uint64_t i = 0;
 	uint64_t index;
 	struct vhost_stats *pstats = &vq->stats;
 
+	pstats->xstats[VHOST_BYTE] += nb_bytes;
+	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
+	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
+
 	for (i = 0; i < count ; i++) {
+		pstats->xstats[VHOST_PKT]++;
 		pkt_len = bufs[i]->pkt_len;
 		if (pkt_len == 64) {
 			pstats->xstats[VHOST_64_PKT]++;
@@ -375,7 +368,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq,
 			else if (pkt_len > 1522)
 				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
 		}
-		vhost_count_multicast_broadcast(vq, bufs[i]);
+		vhost_count_xcast_packets(vq, bufs[i]);
 	}
 }
 
@@ -385,6 +378,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_rx = 0;
 	uint16_t nb_receive = nb_bufs;
+	uint64_t nb_bytes = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -419,10 +413,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		if (r->internal->vlan_strip)
 			rte_vlan_strip(bufs[i]);
 
-		r->stats.bytes += bufs[i]->pkt_len;
+		nb_bytes += bufs[i]->pkt_len;
 	}
 
-	vhost_update_packet_xstats(r, bufs, nb_rx);
+	r->stats.bytes += nb_bytes;
+	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
 
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
@@ -436,6 +431,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_tx = 0;
 	uint16_t nb_send = 0;
+	uint64_t nb_bytes = 0;
+	uint64_t nb_missed = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -476,20 +473,23 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			break;
 	}
 
+	for (i = 0; likely(i < nb_tx); i++)
+		nb_bytes += bufs[i]->pkt_len;
+
+	nb_missed = nb_bufs - nb_tx;
+
 	r->stats.pkts += nb_tx;
+	r->stats.bytes += nb_bytes;
 	r->stats.missed_pkts += nb_bufs - nb_tx;
 
-	for (i = 0; likely(i < nb_tx); i++)
-		r->stats.bytes += bufs[i]->pkt_len;
-
-	vhost_update_packet_xstats(r, bufs, nb_tx);
+	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
 
-	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
-	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
-	 * are increased when packets are not transmitted successfully.
+	/* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
+	 * ifHCOutBroadcastPkts counters are increased when packets are not
+	 * transmitted successfully.
 	 */
 	for (i = nb_tx; i < nb_bufs; i++)
-		vhost_count_multicast_broadcast(r, bufs[i]);
+		vhost_count_xcast_packets(r, bufs[i]);
 
 	for (i = 0; likely(i < nb_tx); i++)
 		rte_pktmbuf_free(bufs[i]);
-- 
2.18.4


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH v3] net/vhost: fix xstats wrong after clearing stats
  2020-10-15 17:49   ` [dpdk-dev] [PATCH v3] " David Christensen
@ 2020-10-16  1:38     ` Xia, Chenbo
  2020-10-23 10:54     ` Maxime Coquelin
  2020-10-23 11:22     ` Maxime Coquelin
  2 siblings, 0 replies; 11+ messages in thread
From: Xia, Chenbo @ 2020-10-16  1:38 UTC (permalink / raw)
  To: David Christensen, dev, maxime.coquelin, Wang, Zhihong; +Cc: stable

> -----Original Message-----
> From: David Christensen <drc@linux.vnet.ibm.com>
> Sent: Friday, October 16, 2020 1:50 AM
> To: dev@dpdk.org; maxime.coquelin@redhat.com; Xia, Chenbo
> <chenbo.xia@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>
> Cc: stable@dpdk.org; David Christensen <drc@linux.vnet.ibm.com>
> Subject: [PATCH v3] net/vhost: fix xstats wrong after clearing stats
> 
> The PMD API allows stats and xstats values to be cleared separately.
> This is a problem for the vhost PMD since some of the xstats values are
> derived from existing stats values.  For example:
> 
> testpmd> show port xstats all
> ...
> tx_unicast_packets: 17562959
> ...
> testpmd> clear port stats all
> ...
> show port xstats all
> ...
> tx_unicast_packets: 18446744073709551615
> ...
> 
> Modify the driver so that stats and xstats values are stored, updated,
> and cleared separately.
> 
> Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
> ---
> v3:
> * Modified comment that unicast packets include unsent packets
> * Change Cc: to stable@dpdk.org
> v2:
> * Removed newly unused vq loops
> * Added "fixes" message
> * Renamed vhost_count_multicast_broadcast to vhost_count_xcast_packets
> 
>  drivers/net/vhost/rte_eth_vhost.c | 76 +++++++++++++++----------------
>  1 file changed, 38 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/net/vhost/rte_eth_vhost.c
> b/drivers/net/vhost/rte_eth_vhost.c
> index e55278af6..886b3afe0 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
>  	VHOST_BROADCAST_PKT,
>  	VHOST_MULTICAST_PKT,
>  	VHOST_UNICAST_PKT,
> +	VHOST_PKT,
> +	VHOST_BYTE,
> +	VHOST_MISSED_PKT,
>  	VHOST_ERRORS_PKT,
>  	VHOST_ERRORS_FRAGMENTED,
>  	VHOST_ERRORS_JABBER,
> @@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
>  /* [rx]_is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off
> vhost_rxport_stat_strings[] = {
>  /* [tx]_ is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -287,23 +290,6 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  	if (n < nxstats)
>  		return nxstats;
> 
> -	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -		vq = dev->data->rx_queues[i];
> -		if (!vq)
> -			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
> -	}
> -	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> -		vq = dev->data->tx_queues[i];
> -		if (!vq)
> -			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				+ vq->stats.missed_pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
> -	}
>  	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
>  		xstats[count].value = 0;
>  		for (i = 0; i < dev->data->nb_rx_queues; i++) {
> @@ -334,7 +320,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  }
> 
>  static inline void
> -vhost_count_multicast_broadcast(struct vhost_queue *vq,
> +vhost_count_xcast_packets(struct vhost_queue *vq,
>  				struct rte_mbuf *mbuf)
>  {
>  	struct rte_ether_addr *ea = NULL;
> @@ -346,20 +332,27 @@ vhost_count_multicast_broadcast(struct vhost_queue
> *vq,
>  			pstats->xstats[VHOST_BROADCAST_PKT]++;
>  		else
>  			pstats->xstats[VHOST_MULTICAST_PKT]++;
> +	} else {
> +		pstats->xstats[VHOST_UNICAST_PKT]++;
>  	}
>  }
> 
>  static void
> -vhost_update_packet_xstats(struct vhost_queue *vq,
> -			   struct rte_mbuf **bufs,
> -			   uint16_t count)
> +vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
> +			   uint16_t count, uint64_t nb_bytes,
> +			   uint64_t nb_missed)
>  {
>  	uint32_t pkt_len = 0;
>  	uint64_t i = 0;
>  	uint64_t index;
>  	struct vhost_stats *pstats = &vq->stats;
> 
> +	pstats->xstats[VHOST_BYTE] += nb_bytes;
> +	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
> +	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
> +
>  	for (i = 0; i < count ; i++) {
> +		pstats->xstats[VHOST_PKT]++;
>  		pkt_len = bufs[i]->pkt_len;
>  		if (pkt_len == 64) {
>  			pstats->xstats[VHOST_64_PKT]++;
> @@ -375,7 +368,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq,
>  			else if (pkt_len > 1522)
>  				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
>  		}
> -		vhost_count_multicast_broadcast(vq, bufs[i]);
> +		vhost_count_xcast_packets(vq, bufs[i]);
>  	}
>  }
> 
> @@ -385,6 +378,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_rx = 0;
>  	uint16_t nb_receive = nb_bufs;
> +	uint64_t nb_bytes = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -419,10 +413,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  		if (r->internal->vlan_strip)
>  			rte_vlan_strip(bufs[i]);
> 
> -		r->stats.bytes += bufs[i]->pkt_len;
> +		nb_bytes += bufs[i]->pkt_len;
>  	}
> 
> -	vhost_update_packet_xstats(r, bufs, nb_rx);
> +	r->stats.bytes += nb_bytes;
> +	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
> 
>  out:
>  	rte_atomic32_set(&r->while_queuing, 0);
> @@ -436,6 +431,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_tx = 0;
>  	uint16_t nb_send = 0;
> +	uint64_t nb_bytes = 0;
> +	uint64_t nb_missed = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -476,20 +473,23 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  			break;
>  	}
> 
> +	for (i = 0; likely(i < nb_tx); i++)
> +		nb_bytes += bufs[i]->pkt_len;
> +
> +	nb_missed = nb_bufs - nb_tx;
> +
>  	r->stats.pkts += nb_tx;
> +	r->stats.bytes += nb_bytes;
>  	r->stats.missed_pkts += nb_bufs - nb_tx;
> 
> -	for (i = 0; likely(i < nb_tx); i++)
> -		r->stats.bytes += bufs[i]->pkt_len;
> -
> -	vhost_update_packet_xstats(r, bufs, nb_tx);
> +	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
> 
> -	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
> -	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
> -	 * are increased when packets are not transmitted successfully.
> +	/* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
> +	 * ifHCOutBroadcastPkts counters are increased when packets are not
> +	 * transmitted successfully.
>  	 */
>  	for (i = nb_tx; i < nb_bufs; i++)
> -		vhost_count_multicast_broadcast(r, bufs[i]);
> +		vhost_count_xcast_packets(r, bufs[i]);
> 
>  	for (i = 0; likely(i < nb_tx); i++)
>  		rte_pktmbuf_free(bufs[i]);
> --
> 2.18.4

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH v3] net/vhost: fix xstats wrong after clearing stats
  2020-10-15 17:49   ` [dpdk-dev] [PATCH v3] " David Christensen
  2020-10-16  1:38     ` Xia, Chenbo
@ 2020-10-23 10:54     ` Maxime Coquelin
  2020-10-23 11:22     ` Maxime Coquelin
  2 siblings, 0 replies; 11+ messages in thread
From: Maxime Coquelin @ 2020-10-23 10:54 UTC (permalink / raw)
  To: David Christensen, dev, chenbo.xia, zhihong.wang; +Cc: stable



On 10/15/20 7:49 PM, David Christensen wrote:
> The PMD API allows stats and xstats values to be cleared separately.
> This is a problem for the vhost PMD since some of the xstats values are
> derived from existing stats values.  For example:
> 
> testpmd> show port xstats all
> ...
> tx_unicast_packets: 17562959
> ...
> testpmd> clear port stats all
> ...
> show port xstats all
> ...
> tx_unicast_packets: 18446744073709551615
> ...
> 
> Modify the driver so that stats and xstats values are stored, updated,
> and cleared separately.
> 
> Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
> ---
> v3:
> * Modified comment that unicast packets include unsent packets
> * Change Cc: to stable@dpdk.org
> v2:
> * Removed newly unused vq loops
> * Added "fixes" message
> * Renamed vhost_count_multicast_broadcast to vhost_count_xcast_packets
> 
>  drivers/net/vhost/rte_eth_vhost.c | 76 +++++++++++++++----------------
>  1 file changed, 38 insertions(+), 38 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [dpdk-dev] [PATCH v3] net/vhost: fix xstats wrong after clearing stats
  2020-10-15 17:49   ` [dpdk-dev] [PATCH v3] " David Christensen
  2020-10-16  1:38     ` Xia, Chenbo
  2020-10-23 10:54     ` Maxime Coquelin
@ 2020-10-23 11:22     ` Maxime Coquelin
  2 siblings, 0 replies; 11+ messages in thread
From: Maxime Coquelin @ 2020-10-23 11:22 UTC (permalink / raw)
  To: David Christensen, dev, chenbo.xia, zhihong.wang; +Cc: stable



On 10/15/20 7:49 PM, David Christensen wrote:
> The PMD API allows stats and xstats values to be cleared separately.
> This is a problem for the vhost PMD since some of the xstats values are
> derived from existing stats values.  For example:
> 
> testpmd> show port xstats all
> ...
> tx_unicast_packets: 17562959
> ...
> testpmd> clear port stats all
> ...
> show port xstats all
> ...
> tx_unicast_packets: 18446744073709551615
> ...
> 
> Modify the driver so that stats and xstats values are stored, updated,
> and cleared separately.
> 
> Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
> ---
> v3:
> * Modified comment that unicast packets include unsent packets
> * Change Cc: to stable@dpdk.org
> v2:
> * Removed newly unused vq loops
> * Added "fixes" message
> * Renamed vhost_count_multicast_broadcast to vhost_count_xcast_packets
> 
>  drivers/net/vhost/rte_eth_vhost.c | 76 +++++++++++++++----------------
>  1 file changed, 38 insertions(+), 38 deletions(-)

Applied to dpdk-next-virtio/main.

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2020-10-23 11:22 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-02 17:03 [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats David Christensen
2020-09-11  7:44 ` Xia, Chenbo
2020-09-23  8:07   ` Maxime Coquelin
2020-10-05 17:43     ` David Christensen
2020-10-06  7:32       ` Maxime Coquelin
2020-10-06 21:23 ` [dpdk-dev] [PATCH v2] " David Christensen
2020-10-09  3:13   ` Xia, Chenbo
2020-10-15 17:49   ` [dpdk-dev] [PATCH v3] " David Christensen
2020-10-16  1:38     ` Xia, Chenbo
2020-10-23 10:54     ` Maxime Coquelin
2020-10-23 11:22     ` Maxime Coquelin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).