* [dpdk-dev] [PATCH] vhost: add pmd xstats
@ 2016-08-19 12:16 Zhiyong Yang
2016-08-22 7:52 ` Panu Matilainen
2016-09-09 8:15 ` [dpdk-dev] [PATCH v2] net/vhost: " Zhiyong Yang
0 siblings, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-08-19 12:16 UTC (permalink / raw)
To: dev; +Cc: Zhiyong Yang
This feature adds vhost pmd extended statistics from per queue perspective
for the application such as OVS etc.
The statistics counters are based on RFC 2819 and 2863 as follows:
rx/tx_good_packets
rx/tx_total_bytes
rx/tx_dropped_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_ucast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;
No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics.
Since collecting data of vhost_update_packet_xstats will have some effect
on RX/TX performance, so, Setting compiling switch
CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default in the file
config/common_base, if needing xstats data, you can enable it(y).
The usage of vhost pmd is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:
show port xstats all|port_id will show vhost xstats
clear port xstats all|port_id will reset vhost xstats
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
config/common_base | 1 +
drivers/net/vhost/rte_eth_vhost.c | 295 +++++++++++++++++++++++++++++++++++++-
2 files changed, 295 insertions(+), 1 deletion(-)
diff --git a/config/common_base b/config/common_base
index 7830535..57fcb3f 100644
--- a/config/common_base
+++ b/config/common_base
@@ -561,6 +561,7 @@ CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
# To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled.
#
CONFIG_RTE_LIBRTE_PMD_VHOST=n
+CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n
#
#Compile Xen domain0 support
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7539cd4..20b77ca 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -45,6 +45,9 @@
#include <rte_kvargs.h>
#include <rte_virtio_net.h>
#include <rte_spinlock.h>
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+#include <rte_common.h>
+#endif
#include "rte_eth_vhost.h"
@@ -72,6 +75,12 @@ static struct ether_addr base_eth_addr = {
}
};
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+struct vhost_xstats {
+ uint64_t stat[16];
+};
+#endif
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -85,7 +94,10 @@ struct vhost_queue {
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
-};
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+ struct vhost_xstats xstats;
+#endif
+ };
struct pmd_internal {
char *dev_name;
@@ -127,6 +139,274 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+enum rte_vhostqueue_rxtx {
+ RTE_VHOSTQUEUE_RX = 0,
+ RTE_VHOSTQUEUE_TX = 1
+};
+
+#define RTE_ETH_VHOST_XSTATS_NAME_SIZE 64
+
+struct rte_vhost_xstats_name_off {
+ char name[RTE_ETH_VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rt]_qX_ is prepended to the name string here */
+static const struct rte_vhost_xstats_name_off rte_vhost_rxq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, rx_pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, rx_bytes)},
+ {"dropped_pkts",
+ offsetof(struct vhost_queue, missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, xstats.stat[8])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, xstats.stat[9])},
+ {"ucast_packets",
+ offsetof(struct vhost_queue, xstats.stat[10])},
+ {"undersize_errors",
+ offsetof(struct vhost_queue, xstats.stat[0])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, xstats.stat[1])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, xstats.stat[2])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, xstats.stat[3])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, xstats.stat[4])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, xstats.stat[5])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, xstats.stat[6])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, xstats.stat[7])},
+ {"errors",
+ offsetof(struct vhost_queue, xstats.stat[11])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, xstats.stat[12])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, xstats.stat[13])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, xstats.stat[14])},
+};
+
+/* [tx]_qX_ is prepended to the name string here */
+static const struct rte_vhost_xstats_name_off rte_vhost_txq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, tx_pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, tx_bytes)},
+ {"dropped_pkts",
+ offsetof(struct vhost_queue, missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, xstats.stat[8])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, xstats.stat[9])},
+ {"ucast_packets",
+ offsetof(struct vhost_queue, xstats.stat[10])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, xstats.stat[1])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, xstats.stat[2])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, xstats.stat[3])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, xstats.stat[4])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, xstats.stat[5])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, xstats.stat[6])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, xstats.stat[7])},
+ {"errors",
+ offsetof(struct vhost_queue, xstats.stat[11])},
+};
+
+#define VHOST_NB_RXQ_XSTATS (sizeof(rte_vhost_rxq_stat_strings) / \
+ sizeof(rte_vhost_rxq_stat_strings[0]))
+
+#define VHOST_NB_TXQ_XSTATS (sizeof(rte_vhost_txq_stat_strings) / \
+ sizeof(rte_vhost_txq_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vqrx = NULL;
+ struct vhost_queue *vqtx = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+ vqrx = (struct vhost_queue *)dev->data->rx_queues[i];
+ vqrx->rx_pkts = 0;
+ vqrx->rx_bytes = 0;
+ memset(&vqrx->xstats, 0, sizeof(vqrx->xstats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+ vqtx = (struct vhost_queue *)dev->data->tx_queues[i];
+ vqtx->tx_pkts = 0;
+ vqtx->tx_bytes = 0;
+ vqtx->missed_pkts = 0;
+ memset(&vqtx->xstats, 0, sizeof(vqtx->xstats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ unsigned int i = 0;
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (xstats_names) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vhost_queue *rxvq = dev->data->rx_queues[i];
+
+ if (!rxvq)
+ continue;
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ rte_vhost_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vhost_queue *txvq = dev->data->tx_queues[i];
+
+ if (!txvq)
+ continue;
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ rte_vhost_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+ return count;
+ }
+ return nstats;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+
+ unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vhost_queue *rxvq =
+ (struct vhost_queue *)dev->data->rx_queues[i];
+
+ if (!rxvq)
+ continue;
+
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ xstats[count].value = *(uint64_t *)(((char *)rxvq)
+ + rte_vhost_rxq_stat_strings[t].offset);
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vhost_queue *txvq =
+ (struct vhost_queue *)dev->data->tx_queues[i];
+
+ if (!txvq)
+ continue;
+
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ xstats[count].value = *(uint64_t *)(((char *)txvq)
+ + rte_vhost_txq_stat_strings[t].offset);
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t nb_rxtx,
+ uint16_t nb_bufs,
+ enum rte_vhostqueue_rxtx vqueue_rxtx)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct ether_addr *ea = NULL;
+ struct vhost_xstats *xstats_update = &vq->xstats;
+
+ for (i = 0; i < nb_rxtx ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ xstats_update->stat[1]++;
+
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ xstats_update->stat[index]++;
+ } else {
+ if (pkt_len < 64)
+ xstats_update->stat[0]++;
+ else if (pkt_len <= 1522)
+ xstats_update->stat[6]++;
+ else if (pkt_len > 1522)
+ xstats_update->stat[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ /* broadcast++; */
+ xstats_update->stat[8]++;
+ else
+ /* multicast++; */
+ xstats_update->stat[9]++;
+ }
+ }
+ /* non-multi/broadcast, multi/broadcast, including those
+ * that were discarded or not sent. from rfc2863
+ */
+ if (vqueue_rxtx == RTE_VHOSTQUEUE_RX) {
+ xstats_update->stat[10] = vq->rx_pkts + vq->missed_pkts
+ - (xstats_update->stat[8]
+ + xstats_update->stat[9]);
+ } else {
+ for (i = nb_rxtx; i < nb_bufs ; i++) {
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ xstats_update->stat[8]++;
+ else
+ xstats_update->stat[9]++;
+ }
+ }
+ xstats_update->stat[10] = vq->tx_pkts + vq->missed_pkts
+ - (xstats_update->stat[8] + xstats_update->stat[9]);
+ }
+}
+#endif
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -152,6 +432,10 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
r->rx_bytes += bufs[i]->pkt_len;
}
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+ vhost_update_packet_xstats(r, bufs, nb_rx, nb_rx, RTE_VHOSTQUEUE_RX);
+#endif
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -182,6 +466,10 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_tx); i++)
r->tx_bytes += bufs[i]->pkt_len;
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+ vhost_update_packet_xstats(r, bufs, nb_tx, nb_bufs, RTE_VHOSTQUEUE_TX);
+#endif
+
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
out:
@@ -682,6 +970,11 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
+#endif
};
static int
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-19 12:16 [dpdk-dev] [PATCH] vhost: add pmd xstats Zhiyong Yang
@ 2016-08-22 7:52 ` Panu Matilainen
2016-08-23 8:04 ` Yang, Zhiyong
2016-09-09 8:15 ` [dpdk-dev] [PATCH v2] net/vhost: " Zhiyong Yang
1 sibling, 1 reply; 57+ messages in thread
From: Panu Matilainen @ 2016-08-22 7:52 UTC (permalink / raw)
To: Zhiyong Yang, dev
On 08/19/2016 03:16 PM, Zhiyong Yang wrote:
> This feature adds vhost pmd extended statistics from per queue perspective
> for the application such as OVS etc.
>
> The statistics counters are based on RFC 2819 and 2863 as follows:
>
> rx/tx_good_packets
> rx/tx_total_bytes
> rx/tx_dropped_pkts
> rx/tx_broadcast_packets
> rx/tx_multicast_packets
> rx/tx_ucast_packets
> rx/tx_undersize_errors
> rx/tx_size_64_packets
> rx/tx_size_65_to_127_packets;
> rx/tx_size_128_to_255_packets;
> rx/tx_size_256_to_511_packets;
> rx/tx_size_512_to_1023_packets;
> rx/tx_size_1024_to_1522_packets;
> rx/tx_1523_to_max_packets;
> rx/tx_errors
> rx_fragmented_errors
> rx_jabber_errors
> rx_unknown_protos_packets;
>
> No API is changed or added.
> rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
> supported,
> rte_eth_xstats_get() to retrieve vhost extended statistics,
> rte_eth_xstats_reset() to reset vhost extended statistics.
>
> Since collecting data of vhost_update_packet_xstats will have some effect
> on RX/TX performance, so, Setting compiling switch
> CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default in the file
> config/common_base, if needing xstats data, you can enable it(y).
NAK, such things need to be switchable at run-time.
- Panu -
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-22 7:52 ` Panu Matilainen
@ 2016-08-23 8:04 ` Yang, Zhiyong
2016-08-23 9:45 ` Panu Matilainen
0 siblings, 1 reply; 57+ messages in thread
From: Yang, Zhiyong @ 2016-08-23 8:04 UTC (permalink / raw)
To: Panu Matilainen, dev; +Cc: Yang, Zhiyong
Hi, Panu:
> -----Original Message-----
> From: Panu Matilainen [mailto:pmatilai@redhat.com]
> Sent: Monday, August 22, 2016 3:53 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> On 08/19/2016 03:16 PM, Zhiyong Yang wrote:
> > This feature adds vhost pmd extended statistics from per queue
> > perspective for the application such as OVS etc.
> >
> > The statistics counters are based on RFC 2819 and 2863 as follows:
> >
> > rx/tx_good_packets
> > rx/tx_total_bytes
> > rx/tx_dropped_pkts
> > rx/tx_broadcast_packets
> > rx/tx_multicast_packets
> > rx/tx_ucast_packets
> > rx/tx_undersize_errors
> > rx/tx_size_64_packets
> > rx/tx_size_65_to_127_packets;
> > rx/tx_size_128_to_255_packets;
> > rx/tx_size_256_to_511_packets;
> > rx/tx_size_512_to_1023_packets;
> > rx/tx_size_1024_to_1522_packets;
> > rx/tx_1523_to_max_packets;
> > rx/tx_errors
> > rx_fragmented_errors
> > rx_jabber_errors
> > rx_unknown_protos_packets;
> >
> > No API is changed or added.
> > rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
> > supported,
> > rte_eth_xstats_get() to retrieve vhost extended statistics,
> > rte_eth_xstats_reset() to reset vhost extended statistics.
> >
> > Since collecting data of vhost_update_packet_xstats will have some
> > effect on RX/TX performance, so, Setting compiling switch
> > CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default in the
> file
> > config/common_base, if needing xstats data, you can enable it(y).
>
> NAK, such things need to be switchable at run-time.
>
> - Panu -
Considering the following reasons using the compiler switch, not
command-line at run-time.
1.Similar xstats update functions are always collecting stats data in the
background when rx/tx are running, such as the physical NIC or virtio,
which have no switch. Compiler switch for vhost pmd xstats is added
as a option when performance is viewed as critical factor.
2. No data structure and API in any layer support the xstats update switch
at run-time. Common data structure (struct rte_eth_dev_data) has no
device-specific data member, if implementing enable/disable of vhost_update
_packet_xstats at run-time, must define a flag(device-specific) in it,
because the definition of struct vhost_queue in the driver code
(eth_vhost_rx/eth_vhost_tx processing)is not visible from device perspective.
3. I tested RX/TX with v1 patch (y) as reference based on Intel(R)
Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst mode, 32 packets
in one RX/TX processing. Overhead of vhost_update_packet_xstats is less than
3% for the rx/tx processing. It looks that vhost_update_packet_xstats has a
limited effect on performance drop.
-zhiyong-
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-23 8:04 ` Yang, Zhiyong
@ 2016-08-23 9:45 ` Panu Matilainen
2016-08-24 5:46 ` Yuanhan Liu
0 siblings, 1 reply; 57+ messages in thread
From: Panu Matilainen @ 2016-08-23 9:45 UTC (permalink / raw)
To: Yang, Zhiyong, dev
On 08/23/2016 11:04 AM, Yang, Zhiyong wrote:
> Hi, Panu:
>
>> -----Original Message-----
>> From: Panu Matilainen [mailto:pmatilai@redhat.com]
>> Sent: Monday, August 22, 2016 3:53 PM
>> To: Yang, Zhiyong <zhiyong.yang@intel.com>; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
>>
>> On 08/19/2016 03:16 PM, Zhiyong Yang wrote:
>>> This feature adds vhost pmd extended statistics from per queue
>>> perspective for the application such as OVS etc.
>>>
>>> The statistics counters are based on RFC 2819 and 2863 as follows:
>>>
>>> rx/tx_good_packets
>>> rx/tx_total_bytes
>>> rx/tx_dropped_pkts
>>> rx/tx_broadcast_packets
>>> rx/tx_multicast_packets
>>> rx/tx_ucast_packets
>>> rx/tx_undersize_errors
>>> rx/tx_size_64_packets
>>> rx/tx_size_65_to_127_packets;
>>> rx/tx_size_128_to_255_packets;
>>> rx/tx_size_256_to_511_packets;
>>> rx/tx_size_512_to_1023_packets;
>>> rx/tx_size_1024_to_1522_packets;
>>> rx/tx_1523_to_max_packets;
>>> rx/tx_errors
>>> rx_fragmented_errors
>>> rx_jabber_errors
>>> rx_unknown_protos_packets;
>>>
>>> No API is changed or added.
>>> rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
>>> supported,
>>> rte_eth_xstats_get() to retrieve vhost extended statistics,
>>> rte_eth_xstats_reset() to reset vhost extended statistics.
>>>
>>> Since collecting data of vhost_update_packet_xstats will have some
>>> effect on RX/TX performance, so, Setting compiling switch
>>> CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default in the
>> file
>>> config/common_base, if needing xstats data, you can enable it(y).
>>
>> NAK, such things need to be switchable at run-time.
>>
>> - Panu -
>
> Considering the following reasons using the compiler switch, not
> command-line at run-time.
>
> 1.Similar xstats update functions are always collecting stats data in the
> background when rx/tx are running, such as the physical NIC or virtio,
> which have no switch. Compiler switch for vhost pmd xstats is added
> as a option when performance is viewed as critical factor.
>
> 2. No data structure and API in any layer support the xstats update switch
> at run-time. Common data structure (struct rte_eth_dev_data) has no
> device-specific data member, if implementing enable/disable of vhost_update
> _packet_xstats at run-time, must define a flag(device-specific) in it,
> because the definition of struct vhost_queue in the driver code
> (eth_vhost_rx/eth_vhost_tx processing)is not visible from device perspective.
>
> 3. I tested RX/TX with v1 patch (y) as reference based on Intel(R)
> Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst mode, 32 packets
> in one RX/TX processing. Overhead of vhost_update_packet_xstats is less than
> 3% for the rx/tx processing. It looks that vhost_update_packet_xstats has a
> limited effect on performance drop.
Well, either the performance overhead is acceptable and it should always
be on (like with physical NICs I think). Or it is not. In which case it
needs to be turnable on and off, at run-time. Rebuilding is not an
option in the world of distros.
- Panu -
>
> -zhiyong-
>
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-23 9:45 ` Panu Matilainen
@ 2016-08-24 5:46 ` Yuanhan Liu
2016-08-24 8:44 ` Thomas Monjalon
0 siblings, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-08-24 5:46 UTC (permalink / raw)
To: Panu Matilainen; +Cc: Yang, Zhiyong, dev
On Tue, Aug 23, 2016 at 12:45:54PM +0300, Panu Matilainen wrote:
> >>>Since collecting data of vhost_update_packet_xstats will have some
> >>>effect on RX/TX performance, so, Setting compiling switch
> >>>CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default in the
> >>file
> >>>config/common_base, if needing xstats data, you can enable it(y).
> >>
> >>NAK, such things need to be switchable at run-time.
> >>
> >> - Panu -
> >
> >Considering the following reasons using the compiler switch, not
> >command-line at run-time.
> >
> >1.Similar xstats update functions are always collecting stats data in the
> >background when rx/tx are running, such as the physical NIC or virtio,
> >which have no switch. Compiler switch for vhost pmd xstats is added
> >as a option when performance is viewed as critical factor.
> >
> >2. No data structure and API in any layer support the xstats update switch
> >at run-time. Common data structure (struct rte_eth_dev_data) has no
> >device-specific data member, if implementing enable/disable of vhost_update
> >_packet_xstats at run-time, must define a flag(device-specific) in it,
> >because the definition of struct vhost_queue in the driver code
> >(eth_vhost_rx/eth_vhost_tx processing)is not visible from device perspective.
> >
> >3. I tested RX/TX with v1 patch (y) as reference based on Intel(R)
> >Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst mode, 32 packets
> >in one RX/TX processing. Overhead of vhost_update_packet_xstats is less than
> >3% for the rx/tx processing. It looks that vhost_update_packet_xstats has a
> >limited effect on performance drop.
>
> Well, either the performance overhead is acceptable and it should always be
> on (like with physical NICs I think). Or it is not. In which case it needs
> to be turnable on and off, at run-time. Rebuilding is not an option in the
> world of distros.
I think the less than 3% overhead is acceptable here, that I agree with
Panu we should always keep it on. If someone compains it later that even
3% is too big for them, let's consider to make it be switchable at
run-time. Either we could introduce a generic eth API for that, Or
just introduce a vhost one if that doesn't make too much sense to other
eth drivers.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-24 5:46 ` Yuanhan Liu
@ 2016-08-24 8:44 ` Thomas Monjalon
2016-08-24 12:37 ` Panu Matilainen
0 siblings, 1 reply; 57+ messages in thread
From: Thomas Monjalon @ 2016-08-24 8:44 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, Panu Matilainen, Yang, Zhiyong
2016-08-24 13:46, Yuanhan Liu:
> On Tue, Aug 23, 2016 at 12:45:54PM +0300, Panu Matilainen wrote:
> > >>>Since collecting data of vhost_update_packet_xstats will have some
> > >>>effect on RX/TX performance, so, Setting compiling switch
> > >>>CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default in the
> > >>file
> > >>>config/common_base, if needing xstats data, you can enable it(y).
> > >>
> > >>NAK, such things need to be switchable at run-time.
> > >>
> > >> - Panu -
> > >
> > >Considering the following reasons using the compiler switch, not
> > >command-line at run-time.
> > >
> > >1.Similar xstats update functions are always collecting stats data in the
> > >background when rx/tx are running, such as the physical NIC or virtio,
> > >which have no switch. Compiler switch for vhost pmd xstats is added
> > >as a option when performance is viewed as critical factor.
> > >
> > >2. No data structure and API in any layer support the xstats update switch
> > >at run-time. Common data structure (struct rte_eth_dev_data) has no
> > >device-specific data member, if implementing enable/disable of vhost_update
> > >_packet_xstats at run-time, must define a flag(device-specific) in it,
> > >because the definition of struct vhost_queue in the driver code
> > >(eth_vhost_rx/eth_vhost_tx processing)is not visible from device perspective.
> > >
> > >3. I tested RX/TX with v1 patch (y) as reference based on Intel(R)
> > >Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst mode, 32 packets
> > >in one RX/TX processing. Overhead of vhost_update_packet_xstats is less than
> > >3% for the rx/tx processing. It looks that vhost_update_packet_xstats has a
> > >limited effect on performance drop.
> >
> > Well, either the performance overhead is acceptable and it should always be
> > on (like with physical NICs I think). Or it is not. In which case it needs
> > to be turnable on and off, at run-time. Rebuilding is not an option in the
> > world of distros.
>
> I think the less than 3% overhead is acceptable here, that I agree with
> Panu we should always keep it on. If someone compains it later that even
> 3% is too big for them, let's consider to make it be switchable at
> run-time. Either we could introduce a generic eth API for that, Or
> just introduce a vhost one if that doesn't make too much sense to other
> eth drivers.
+1
It may have sense to introduce a generic run-time option for stats.
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-24 8:44 ` Thomas Monjalon
@ 2016-08-24 12:37 ` Panu Matilainen
2016-08-25 9:21 ` Yang, Zhiyong
0 siblings, 1 reply; 57+ messages in thread
From: Panu Matilainen @ 2016-08-24 12:37 UTC (permalink / raw)
To: Thomas Monjalon, Yuanhan Liu; +Cc: dev, Yang, Zhiyong
On 08/24/2016 11:44 AM, Thomas Monjalon wrote:
> 2016-08-24 13:46, Yuanhan Liu:
>> On Tue, Aug 23, 2016 at 12:45:54PM +0300, Panu Matilainen wrote:
>>>>>> Since collecting data of vhost_update_packet_xstats will have some
>>>>>> effect on RX/TX performance, so, Setting compiling switch
>>>>>> CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default in the
>>>>> file
>>>>>> config/common_base, if needing xstats data, you can enable it(y).
>>>>>
>>>>> NAK, such things need to be switchable at run-time.
>>>>>
>>>>> - Panu -
>>>>
>>>> Considering the following reasons using the compiler switch, not
>>>> command-line at run-time.
>>>>
>>>> 1.Similar xstats update functions are always collecting stats data in the
>>>> background when rx/tx are running, such as the physical NIC or virtio,
>>>> which have no switch. Compiler switch for vhost pmd xstats is added
>>>> as a option when performance is viewed as critical factor.
>>>>
>>>> 2. No data structure and API in any layer support the xstats update switch
>>>> at run-time. Common data structure (struct rte_eth_dev_data) has no
>>>> device-specific data member, if implementing enable/disable of vhost_update
>>>> _packet_xstats at run-time, must define a flag(device-specific) in it,
>>>> because the definition of struct vhost_queue in the driver code
>>>> (eth_vhost_rx/eth_vhost_tx processing)is not visible from device perspective.
>>>>
>>>> 3. I tested RX/TX with v1 patch (y) as reference based on Intel(R)
>>>> Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst mode, 32 packets
>>>> in one RX/TX processing. Overhead of vhost_update_packet_xstats is less than
>>>> 3% for the rx/tx processing. It looks that vhost_update_packet_xstats has a
>>>> limited effect on performance drop.
>>>
>>> Well, either the performance overhead is acceptable and it should always be
>>> on (like with physical NICs I think). Or it is not. In which case it needs
>>> to be turnable on and off, at run-time. Rebuilding is not an option in the
>>> world of distros.
>>
>> I think the less than 3% overhead is acceptable here, that I agree with
>> Panu we should always keep it on. If someone compains it later that even
>> 3% is too big for them, let's consider to make it be switchable at
>> run-time. Either we could introduce a generic eth API for that, Or
>> just introduce a vhost one if that doesn't make too much sense to other
>> eth drivers.
>
> +1
> It may have sense to introduce a generic run-time option for stats.
>
Yup, sounds good.
- Panu -
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-24 12:37 ` Panu Matilainen
@ 2016-08-25 9:21 ` Yang, Zhiyong
2016-08-30 2:45 ` Yao, Lei A
0 siblings, 1 reply; 57+ messages in thread
From: Yang, Zhiyong @ 2016-08-25 9:21 UTC (permalink / raw)
To: Panu Matilainen, Thomas Monjalon, Yuanhan Liu; +Cc: dev
> -----Original Message-----
> From: Panu Matilainen [mailto:pmatilai@redhat.com]
> Sent: Wednesday, August 24, 2016 8:37 PM
> To: Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu
> <yuanhan.liu@linux.intel.com>
> Cc: dev@dpdk.org; Yang, Zhiyong <zhiyong.yang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> On 08/24/2016 11:44 AM, Thomas Monjalon wrote:
> > 2016-08-24 13:46, Yuanhan Liu:
> >> On Tue, Aug 23, 2016 at 12:45:54PM +0300, Panu Matilainen wrote:
> >>>>>> Since collecting data of vhost_update_packet_xstats will have
> >>>>>> some effect on RX/TX performance, so, Setting compiling switch
> >>>>>> CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default
> in the
> >>>>> file
> >>>>>> config/common_base, if needing xstats data, you can enable it(y).
> >>>>>
> >>>>> NAK, such things need to be switchable at run-time.
> >>>>>
> >>>>> - Panu -
> >>>>
> >>>> Considering the following reasons using the compiler switch, not
> >>>> command-line at run-time.
> >>>>
> >>>> 1.Similar xstats update functions are always collecting stats data
> >>>> in the background when rx/tx are running, such as the physical NIC
> >>>> or virtio, which have no switch. Compiler switch for vhost pmd
> >>>> xstats is added as a option when performance is viewed as critical
> factor.
> >>>>
> >>>> 2. No data structure and API in any layer support the xstats update
> >>>> switch at run-time. Common data structure (struct rte_eth_dev_data)
> >>>> has no device-specific data member, if implementing enable/disable
> >>>> of vhost_update _packet_xstats at run-time, must define a
> >>>> flag(device-specific) in it, because the definition of struct
> >>>> vhost_queue in the driver code (eth_vhost_rx/eth_vhost_tx
> processing)is not visible from device perspective.
> >>>>
> >>>> 3. I tested RX/TX with v1 patch (y) as reference based on Intel(R)
> >>>> Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst mode,
> >>>> 32 packets in one RX/TX processing. Overhead of
> >>>> vhost_update_packet_xstats is less than 3% for the rx/tx
> >>>> processing. It looks that vhost_update_packet_xstats has a limited
> effect on performance drop.
> >>>
> >>> Well, either the performance overhead is acceptable and it should
> >>> always be on (like with physical NICs I think). Or it is not. In
> >>> which case it needs to be turnable on and off, at run-time.
> >>> Rebuilding is not an option in the world of distros.
> >>
> >> I think the less than 3% overhead is acceptable here, that I agree
> >> with Panu we should always keep it on. If someone compains it later
> >> that even 3% is too big for them, let's consider to make it be
> >> switchable at run-time. Either we could introduce a generic eth API
> >> for that, Or just introduce a vhost one if that doesn't make too much
> >> sense to other eth drivers.
> >
> > +1
> > It may have sense to introduce a generic run-time option for stats.
> >
>
> Yup, sounds good.
>
It sounds better , if DPDK can add generic API and structure to the switch
of xstats update. So, any device can use it at run time if necessary.
Can we define one bit data member (xstats_update) in the data structure
struct rte_eth_dev_data?
such as:
uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */
lro : 1, /**< RX LRO is ON(1) / OFF(0) */
xstats_update: 1; /**< xstats update is ON(1) / OFF(0) */
Define 3 functions:
void rte_eth_xstats_update_enable(uint8_t port_id);
void rte_eth_xstats_update_disable(uint8_t port_id);
int rte_eth_xstats_update_get(uint8_t port_id);
Or define two:
/* uint8_t xstats_update ; 1 on, 0 off*/
void rte_eth_xstats_update_enable(uint8_t port_id, uint8_t xstats_update);
int rte_eth_xstats_update_get(uint8_t port_id);
In the struct eth_dev_ops, adding two functions to pass xstats_update to
driver, because the rxq/txq can't access xstats_update directly.
So, add a xstats flag per queue data structure.
for example
struct vhost_queue {
......
Uint64_t xstats_flag;
......
};
typedef uint16_t (*eth_rx_burst_t)(void *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
typedef uint16_t (*eth_tx_burst_t)(void *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
struct eth_dev_ops {
......
eth_xstats_update_enable_t xstats_update_enable; /**< xstats ON. */
eth_xstats_update_disable_t xstats_update_disable;/**< xstats OFF. */
......
};
--zhiyong--
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-25 9:21 ` Yang, Zhiyong
@ 2016-08-30 2:45 ` Yao, Lei A
2016-08-30 3:03 ` Xu, Qian Q
0 siblings, 1 reply; 57+ messages in thread
From: Yao, Lei A @ 2016-08-30 2:45 UTC (permalink / raw)
To: Yang, Zhiyong, Panu Matilainen, Thomas Monjalon, Yuanhan Liu; +Cc: dev
Hi, Zhiyong
I have tested more xstats performance drop data at my side.
Vhost Xstats patch with mergeable on : ~3%
Vhost Xstats patch with mergeable off : ~9%
Because Zhihong also submit patch to improve the performance on for the mergeable on: http://dpdk.org/dev/patchwork/patch/15245/ ~15249. If both patch integrated, the performance drop will be much higher
Vhsot Xstats patch + Vhost mergeable on patch with mergeable on : the performance drop is around 6%
Best Regards
Lei
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yang, Zhiyong
Sent: Thursday, August 25, 2016 5:22 PM
To: Panu Matilainen <pmatilai@redhat.com>; Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu <yuanhan.liu@linux.intel.com>
Cc: dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
> -----Original Message-----
> From: Panu Matilainen [mailto:pmatilai@redhat.com]
> Sent: Wednesday, August 24, 2016 8:37 PM
> To: Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu
> <yuanhan.liu@linux.intel.com>
> Cc: dev@dpdk.org; Yang, Zhiyong <zhiyong.yang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> On 08/24/2016 11:44 AM, Thomas Monjalon wrote:
> > 2016-08-24 13:46, Yuanhan Liu:
> >> On Tue, Aug 23, 2016 at 12:45:54PM +0300, Panu Matilainen wrote:
> >>>>>> Since collecting data of vhost_update_packet_xstats will have
> >>>>>> some effect on RX/TX performance, so, Setting compiling switch
> >>>>>> CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default
> in the
> >>>>> file
> >>>>>> config/common_base, if needing xstats data, you can enable it(y).
> >>>>>
> >>>>> NAK, such things need to be switchable at run-time.
> >>>>>
> >>>>> - Panu -
> >>>>
> >>>> Considering the following reasons using the compiler switch, not
> >>>> command-line at run-time.
> >>>>
> >>>> 1.Similar xstats update functions are always collecting stats
> >>>> data in the background when rx/tx are running, such as the
> >>>> physical NIC or virtio, which have no switch. Compiler switch for
> >>>> vhost pmd xstats is added as a option when performance is viewed
> >>>> as critical
> factor.
> >>>>
> >>>> 2. No data structure and API in any layer support the xstats
> >>>> update switch at run-time. Common data structure (struct
> >>>> rte_eth_dev_data) has no device-specific data member, if
> >>>> implementing enable/disable of vhost_update _packet_xstats at
> >>>> run-time, must define a
> >>>> flag(device-specific) in it, because the definition of struct
> >>>> vhost_queue in the driver code (eth_vhost_rx/eth_vhost_tx
> processing)is not visible from device perspective.
> >>>>
> >>>> 3. I tested RX/TX with v1 patch (y) as reference based on
> >>>> Intel(R)
> >>>> Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst
> >>>> mode,
> >>>> 32 packets in one RX/TX processing. Overhead of
> >>>> vhost_update_packet_xstats is less than 3% for the rx/tx
> >>>> processing. It looks that vhost_update_packet_xstats has a
> >>>> limited
> effect on performance drop.
> >>>
> >>> Well, either the performance overhead is acceptable and it should
> >>> always be on (like with physical NICs I think). Or it is not. In
> >>> which case it needs to be turnable on and off, at run-time.
> >>> Rebuilding is not an option in the world of distros.
> >>
> >> I think the less than 3% overhead is acceptable here, that I agree
> >> with Panu we should always keep it on. If someone compains it later
> >> that even 3% is too big for them, let's consider to make it be
> >> switchable at run-time. Either we could introduce a generic eth API
> >> for that, Or just introduce a vhost one if that doesn't make too
> >> much sense to other eth drivers.
> >
> > +1
> > It may have sense to introduce a generic run-time option for stats.
> >
>
> Yup, sounds good.
>
It sounds better , if DPDK can add generic API and structure to the switch of xstats update. So, any device can use it at run time if necessary.
Can we define one bit data member (xstats_update) in the data structure struct rte_eth_dev_data?
such as:
uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */
lro : 1, /**< RX LRO is ON(1) / OFF(0) */
xstats_update: 1; /**< xstats update is ON(1) / OFF(0) */
Define 3 functions:
void rte_eth_xstats_update_enable(uint8_t port_id); void rte_eth_xstats_update_disable(uint8_t port_id); int rte_eth_xstats_update_get(uint8_t port_id);
Or define two:
/* uint8_t xstats_update ; 1 on, 0 off*/ void rte_eth_xstats_update_enable(uint8_t port_id, uint8_t xstats_update); int rte_eth_xstats_update_get(uint8_t port_id);
In the struct eth_dev_ops, adding two functions to pass xstats_update to driver, because the rxq/txq can't access xstats_update directly.
So, add a xstats flag per queue data structure.
for example
struct vhost_queue {
......
Uint64_t xstats_flag;
......
};
typedef uint16_t (*eth_rx_burst_t)(void *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
typedef uint16_t (*eth_tx_burst_t)(void *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
struct eth_dev_ops {
......
eth_xstats_update_enable_t xstats_update_enable; /**< xstats ON. */
eth_xstats_update_disable_t xstats_update_disable;/**< xstats OFF. */ ......
};
--zhiyong--
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-30 2:45 ` Yao, Lei A
@ 2016-08-30 3:03 ` Xu, Qian Q
2016-08-30 3:21 ` Yao, Lei A
0 siblings, 1 reply; 57+ messages in thread
From: Xu, Qian Q @ 2016-08-30 3:03 UTC (permalink / raw)
To: Yao, Lei A, Yang, Zhiyong, Panu Matilainen, Thomas Monjalon, Yuanhan Liu
Cc: dev
Lei
Could you list the test setup for below findings? I think we need at least to check below tests for mergeable=on/off path:
1. Vhost/virtio loopback
2. PVP test : virtio-pmd IO fwd and virtio-net IPV4 fwd
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yao, Lei A
Sent: Tuesday, August 30, 2016 10:46 AM
To: Yang, Zhiyong <zhiyong.yang@intel.com>; Panu Matilainen <pmatilai@redhat.com>; Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu <yuanhan.liu@linux.intel.com>
Cc: dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
Hi, Zhiyong
I have tested more xstats performance drop data at my side.
Vhost Xstats patch with mergeable on : ~3% Vhost Xstats patch with mergeable off : ~9%
Because Zhihong also submit patch to improve the performance on for the mergeable on: http://dpdk.org/dev/patchwork/patch/15245/ ~15249. If both patch integrated, the performance drop will be much higher
Vhsot Xstats patch + Vhost mergeable on patch with mergeable on : the performance drop is around 6%
Best Regards
Lei
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yang, Zhiyong
Sent: Thursday, August 25, 2016 5:22 PM
To: Panu Matilainen <pmatilai@redhat.com>; Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu <yuanhan.liu@linux.intel.com>
Cc: dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
> -----Original Message-----
> From: Panu Matilainen [mailto:pmatilai@redhat.com]
> Sent: Wednesday, August 24, 2016 8:37 PM
> To: Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu
> <yuanhan.liu@linux.intel.com>
> Cc: dev@dpdk.org; Yang, Zhiyong <zhiyong.yang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> On 08/24/2016 11:44 AM, Thomas Monjalon wrote:
> > 2016-08-24 13:46, Yuanhan Liu:
> >> On Tue, Aug 23, 2016 at 12:45:54PM +0300, Panu Matilainen wrote:
> >>>>>> Since collecting data of vhost_update_packet_xstats will have
> >>>>>> some effect on RX/TX performance, so, Setting compiling switch
> >>>>>> CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default
> in the
> >>>>> file
> >>>>>> config/common_base, if needing xstats data, you can enable it(y).
> >>>>>
> >>>>> NAK, such things need to be switchable at run-time.
> >>>>>
> >>>>> - Panu -
> >>>>
> >>>> Considering the following reasons using the compiler switch, not
> >>>> command-line at run-time.
> >>>>
> >>>> 1.Similar xstats update functions are always collecting stats
> >>>> data in the background when rx/tx are running, such as the
> >>>> physical NIC or virtio, which have no switch. Compiler switch for
> >>>> vhost pmd xstats is added as a option when performance is viewed
> >>>> as critical
> factor.
> >>>>
> >>>> 2. No data structure and API in any layer support the xstats
> >>>> update switch at run-time. Common data structure (struct
> >>>> rte_eth_dev_data) has no device-specific data member, if
> >>>> implementing enable/disable of vhost_update _packet_xstats at
> >>>> run-time, must define a
> >>>> flag(device-specific) in it, because the definition of struct
> >>>> vhost_queue in the driver code (eth_vhost_rx/eth_vhost_tx
> processing)is not visible from device perspective.
> >>>>
> >>>> 3. I tested RX/TX with v1 patch (y) as reference based on
> >>>> Intel(R)
> >>>> Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst
> >>>> mode,
> >>>> 32 packets in one RX/TX processing. Overhead of
> >>>> vhost_update_packet_xstats is less than 3% for the rx/tx
> >>>> processing. It looks that vhost_update_packet_xstats has a
> >>>> limited
> effect on performance drop.
> >>>
> >>> Well, either the performance overhead is acceptable and it should
> >>> always be on (like with physical NICs I think). Or it is not. In
> >>> which case it needs to be turnable on and off, at run-time.
> >>> Rebuilding is not an option in the world of distros.
> >>
> >> I think the less than 3% overhead is acceptable here, that I agree
> >> with Panu we should always keep it on. If someone compains it later
> >> that even 3% is too big for them, let's consider to make it be
> >> switchable at run-time. Either we could introduce a generic eth API
> >> for that, Or just introduce a vhost one if that doesn't make too
> >> much sense to other eth drivers.
> >
> > +1
> > It may have sense to introduce a generic run-time option for stats.
> >
>
> Yup, sounds good.
>
It sounds better , if DPDK can add generic API and structure to the switch of xstats update. So, any device can use it at run time if necessary.
Can we define one bit data member (xstats_update) in the data structure struct rte_eth_dev_data?
such as:
uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */
lro : 1, /**< RX LRO is ON(1) / OFF(0) */
xstats_update: 1; /**< xstats update is ON(1) / OFF(0) */
Define 3 functions:
void rte_eth_xstats_update_enable(uint8_t port_id); void rte_eth_xstats_update_disable(uint8_t port_id); int rte_eth_xstats_update_get(uint8_t port_id);
Or define two:
/* uint8_t xstats_update ; 1 on, 0 off*/ void rte_eth_xstats_update_enable(uint8_t port_id, uint8_t xstats_update); int rte_eth_xstats_update_get(uint8_t port_id);
In the struct eth_dev_ops, adding two functions to pass xstats_update to driver, because the rxq/txq can't access xstats_update directly.
So, add a xstats flag per queue data structure.
for example
struct vhost_queue {
......
Uint64_t xstats_flag;
......
};
typedef uint16_t (*eth_rx_burst_t)(void *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
typedef uint16_t (*eth_tx_burst_t)(void *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
struct eth_dev_ops {
......
eth_xstats_update_enable_t xstats_update_enable; /**< xstats ON. */
eth_xstats_update_disable_t xstats_update_disable;/**< xstats OFF. */ ......
};
--zhiyong--
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-30 3:03 ` Xu, Qian Q
@ 2016-08-30 3:21 ` Yao, Lei A
2016-08-31 7:18 ` Yang, Zhiyong
0 siblings, 1 reply; 57+ messages in thread
From: Yao, Lei A @ 2016-08-30 3:21 UTC (permalink / raw)
To: Xu, Qian Q, Yang, Zhiyong, Panu Matilainen, Thomas Monjalon, Yuanhan Liu
Cc: dev
Hi, Qian
The test setup at my side is Vhost/VirtIO loopback with 64B packets.
-----Original Message-----
From: Xu, Qian Q
Sent: Tuesday, August 30, 2016 11:03 AM
To: Yao, Lei A <lei.a.yao@intel.com>; Yang, Zhiyong <zhiyong.yang@intel.com>; Panu Matilainen <pmatilai@redhat.com>; Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu <yuanhan.liu@linux.intel.com>
Cc: dev@dpdk.org
Subject: RE: [dpdk-dev] [PATCH] vhost: add pmd xstats
Lei
Could you list the test setup for below findings? I think we need at least to check below tests for mergeable=on/off path:
1. Vhost/virtio loopback
2. PVP test : virtio-pmd IO fwd and virtio-net IPV4 fwd
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yao, Lei A
Sent: Tuesday, August 30, 2016 10:46 AM
To: Yang, Zhiyong <zhiyong.yang@intel.com>; Panu Matilainen <pmatilai@redhat.com>; Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu <yuanhan.liu@linux.intel.com>
Cc: dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
Hi, Zhiyong
I have tested more xstats performance drop data at my side.
Vhost Xstats patch with mergeable on : ~3% Vhost Xstats patch with mergeable off : ~9%
Because Zhihong also submit patch to improve the performance on for the mergeable on: http://dpdk.org/dev/patchwork/patch/15245/ ~15249. If both patch integrated, the performance drop will be much higher
Vhsot Xstats patch + Vhost mergeable on patch with mergeable on : the performance drop is around 6%
Best Regards
Lei
-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yang, Zhiyong
Sent: Thursday, August 25, 2016 5:22 PM
To: Panu Matilainen <pmatilai@redhat.com>; Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu <yuanhan.liu@linux.intel.com>
Cc: dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
> -----Original Message-----
> From: Panu Matilainen [mailto:pmatilai@redhat.com]
> Sent: Wednesday, August 24, 2016 8:37 PM
> To: Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu
> <yuanhan.liu@linux.intel.com>
> Cc: dev@dpdk.org; Yang, Zhiyong <zhiyong.yang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> On 08/24/2016 11:44 AM, Thomas Monjalon wrote:
> > 2016-08-24 13:46, Yuanhan Liu:
> >> On Tue, Aug 23, 2016 at 12:45:54PM +0300, Panu Matilainen wrote:
> >>>>>> Since collecting data of vhost_update_packet_xstats will have
> >>>>>> some effect on RX/TX performance, so, Setting compiling switch
> >>>>>> CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default
> in the
> >>>>> file
> >>>>>> config/common_base, if needing xstats data, you can enable it(y).
> >>>>>
> >>>>> NAK, such things need to be switchable at run-time.
> >>>>>
> >>>>> - Panu -
> >>>>
> >>>> Considering the following reasons using the compiler switch, not
> >>>> command-line at run-time.
> >>>>
> >>>> 1.Similar xstats update functions are always collecting stats
> >>>> data in the background when rx/tx are running, such as the
> >>>> physical NIC or virtio, which have no switch. Compiler switch for
> >>>> vhost pmd xstats is added as a option when performance is viewed
> >>>> as critical
> factor.
> >>>>
> >>>> 2. No data structure and API in any layer support the xstats
> >>>> update switch at run-time. Common data structure (struct
> >>>> rte_eth_dev_data) has no device-specific data member, if
> >>>> implementing enable/disable of vhost_update _packet_xstats at
> >>>> run-time, must define a
> >>>> flag(device-specific) in it, because the definition of struct
> >>>> vhost_queue in the driver code (eth_vhost_rx/eth_vhost_tx
> processing)is not visible from device perspective.
> >>>>
> >>>> 3. I tested RX/TX with v1 patch (y) as reference based on
> >>>> Intel(R)
> >>>> Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst
> >>>> mode,
> >>>> 32 packets in one RX/TX processing. Overhead of
> >>>> vhost_update_packet_xstats is less than 3% for the rx/tx
> >>>> processing. It looks that vhost_update_packet_xstats has a
> >>>> limited
> effect on performance drop.
> >>>
> >>> Well, either the performance overhead is acceptable and it should
> >>> always be on (like with physical NICs I think). Or it is not. In
> >>> which case it needs to be turnable on and off, at run-time.
> >>> Rebuilding is not an option in the world of distros.
> >>
> >> I think the less than 3% overhead is acceptable here, that I agree
> >> with Panu we should always keep it on. If someone compains it later
> >> that even 3% is too big for them, let's consider to make it be
> >> switchable at run-time. Either we could introduce a generic eth API
> >> for that, Or just introduce a vhost one if that doesn't make too
> >> much sense to other eth drivers.
> >
> > +1
> > It may have sense to introduce a generic run-time option for stats.
> >
>
> Yup, sounds good.
>
It sounds better , if DPDK can add generic API and structure to the switch of xstats update. So, any device can use it at run time if necessary.
Can we define one bit data member (xstats_update) in the data structure struct rte_eth_dev_data?
such as:
uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */
lro : 1, /**< RX LRO is ON(1) / OFF(0) */
xstats_update: 1; /**< xstats update is ON(1) / OFF(0) */
Define 3 functions:
void rte_eth_xstats_update_enable(uint8_t port_id); void rte_eth_xstats_update_disable(uint8_t port_id); int rte_eth_xstats_update_get(uint8_t port_id);
Or define two:
/* uint8_t xstats_update ; 1 on, 0 off*/ void rte_eth_xstats_update_enable(uint8_t port_id, uint8_t xstats_update); int rte_eth_xstats_update_get(uint8_t port_id);
In the struct eth_dev_ops, adding two functions to pass xstats_update to driver, because the rxq/txq can't access xstats_update directly.
So, add a xstats flag per queue data structure.
for example
struct vhost_queue {
......
Uint64_t xstats_flag;
......
};
typedef uint16_t (*eth_rx_burst_t)(void *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
typedef uint16_t (*eth_tx_burst_t)(void *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
struct eth_dev_ops {
......
eth_xstats_update_enable_t xstats_update_enable; /**< xstats ON. */
eth_xstats_update_disable_t xstats_update_disable;/**< xstats OFF. */ ......
};
--zhiyong--
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-30 3:21 ` Yao, Lei A
@ 2016-08-31 7:18 ` Yang, Zhiyong
2016-09-01 6:37 ` Yang, Zhiyong
0 siblings, 1 reply; 57+ messages in thread
From: Yang, Zhiyong @ 2016-08-31 7:18 UTC (permalink / raw)
To: dev
Cc: Panu Matilainen, Liu, Yuanhan, Thomas Monjalon, Yao, Lei A, Yang,
Zhiyong, Wang, Zhihong
Hi, ALL:
Physical NIC has a set of counters, such as
u64 prc64;
u64 prc127;
u64 prc255; etc.
but now, DPDK has counted the prc64 in two ways. Physical NIC counts
prc64 with CRC by hardware. Virtio computes the counter like prc64
without CRC. This will cause the conflict, when a 64 packet from outer
network is sent to VM(virtio), NIC will show prc64 + 1, virtio will
actually receive the 64-4(CRC) = 60 bytes pkt, undersize(<64) counter
will be increased. Should Vhost do like NIC's behavior or virtio's behavior?
According to rfc2819 description as referece.
etherStatsPkts64Octets OBJECT-TYPE
SYNTAX Counter32
UNITS "Packets"
"The total number of packets (including bad packets) received that were
64 octets in length (excluding framing bits but including FCS octets)."
> -----Original Message-----
> From: Yao, Lei A
> Sent: Tuesday, August 30, 2016 11:22 AM
> To: Xu, Qian Q <qian.q.xu@intel.com>; Yang, Zhiyong
> <zhiyong.yang@intel.com>; Panu Matilainen <pmatilai@redhat.com>;
> Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu
> <yuanhan.liu@linux.intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> Hi, Qian
>
> The test setup at my side is Vhost/VirtIO loopback with 64B packets.
>
>
> -----Original Message-----
> From: Xu, Qian Q
> Sent: Tuesday, August 30, 2016 11:03 AM
> To: Yao, Lei A <lei.a.yao@intel.com>; Yang, Zhiyong
> <zhiyong.yang@intel.com>; Panu Matilainen <pmatilai@redhat.com>;
> Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu
> <yuanhan.liu@linux.intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> Lei
> Could you list the test setup for below findings? I think we need at least to
> check below tests for mergeable=on/off path:
> 1. Vhost/virtio loopback
> 2. PVP test : virtio-pmd IO fwd and virtio-net IPV4 fwd
>
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yao, Lei A
> Sent: Tuesday, August 30, 2016 10:46 AM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>; Panu Matilainen
> <pmatilai@redhat.com>; Thomas Monjalon
> <thomas.monjalon@6wind.com>; Yuanhan Liu <yuanhan.liu@linux.intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> Hi, Zhiyong
>
> I have tested more xstats performance drop data at my side.
> Vhost Xstats patch with mergeable on : ~3% Vhost Xstats patch with
> mergeable off : ~9%
>
> Because Zhihong also submit patch to improve the performance on for the
> mergeable on: http://dpdk.org/dev/patchwork/patch/15245/ ~15249. If
> both patch integrated, the performance drop will be much higher
> Vhsot Xstats patch + Vhost mergeable on patch with mergeable on : the
> performance drop is around 6%
>
>
> Best Regards
> Lei
>
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Yang, Zhiyong
> Sent: Thursday, August 25, 2016 5:22 PM
> To: Panu Matilainen <pmatilai@redhat.com>; Thomas Monjalon
> <thomas.monjalon@6wind.com>; Yuanhan Liu <yuanhan.liu@linux.intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
>
>
> > -----Original Message-----
> > From: Panu Matilainen [mailto:pmatilai@redhat.com]
> > Sent: Wednesday, August 24, 2016 8:37 PM
> > To: Thomas Monjalon <thomas.monjalon@6wind.com>; Yuanhan Liu
> > <yuanhan.liu@linux.intel.com>
> > Cc: dev@dpdk.org; Yang, Zhiyong <zhiyong.yang@intel.com>
> > Subject: Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
> >
> > On 08/24/2016 11:44 AM, Thomas Monjalon wrote:
> > > 2016-08-24 13:46, Yuanhan Liu:
> > >> On Tue, Aug 23, 2016 at 12:45:54PM +0300, Panu Matilainen wrote:
> > >>>>>> Since collecting data of vhost_update_packet_xstats will have
> > >>>>>> some effect on RX/TX performance, so, Setting compiling switch
> > >>>>>> CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default
> > in the
> > >>>>> file
> > >>>>>> config/common_base, if needing xstats data, you can enable it(y).
> > >>>>>
> > >>>>> NAK, such things need to be switchable at run-time.
> > >>>>>
> > >>>>> - Panu -
> > >>>>
> > >>>> Considering the following reasons using the compiler switch, not
> > >>>> command-line at run-time.
> > >>>>
> > >>>> 1.Similar xstats update functions are always collecting stats
> > >>>> data in the background when rx/tx are running, such as the
> > >>>> physical NIC or virtio, which have no switch. Compiler switch for
> > >>>> vhost pmd xstats is added as a option when performance is viewed
> > >>>> as critical
> > factor.
> > >>>>
> > >>>> 2. No data structure and API in any layer support the xstats
> > >>>> update switch at run-time. Common data structure (struct
> > >>>> rte_eth_dev_data) has no device-specific data member, if
> > >>>> implementing enable/disable of vhost_update _packet_xstats at
> > >>>> run-time, must define a
> > >>>> flag(device-specific) in it, because the definition of struct
> > >>>> vhost_queue in the driver code (eth_vhost_rx/eth_vhost_tx
> > processing)is not visible from device perspective.
> > >>>>
> > >>>> 3. I tested RX/TX with v1 patch (y) as reference based on
> > >>>> Intel(R)
> > >>>> Xeon(R) CPU E5-2699 v3 @ 2.30GHz, for 64byts packets in burst
> > >>>> mode,
> > >>>> 32 packets in one RX/TX processing. Overhead of
> > >>>> vhost_update_packet_xstats is less than 3% for the rx/tx
> > >>>> processing. It looks that vhost_update_packet_xstats has a
> > >>>> limited
> > effect on performance drop.
> > >>>
> > >>> Well, either the performance overhead is acceptable and it should
> > >>> always be on (like with physical NICs I think). Or it is not. In
> > >>> which case it needs to be turnable on and off, at run-time.
> > >>> Rebuilding is not an option in the world of distros.
> > >>
> > >> I think the less than 3% overhead is acceptable here, that I agree
> > >> with Panu we should always keep it on. If someone compains it later
> > >> that even 3% is too big for them, let's consider to make it be
> > >> switchable at run-time. Either we could introduce a generic eth API
> > >> for that, Or just introduce a vhost one if that doesn't make too
> > >> much sense to other eth drivers.
> > >
> > > +1
> > > It may have sense to introduce a generic run-time option for stats.
> > >
> >
> > Yup, sounds good.
> >
> It sounds better , if DPDK can add generic API and structure to the switch of
> xstats update. So, any device can use it at run time if necessary.
>
> Can we define one bit data member (xstats_update) in the data structure
> struct rte_eth_dev_data?
> such as:
> uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0).
> */
> scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
> all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
> dev_started : 1, /**< Device state: STARTED(1) / STOPPED(0). */
> lro : 1, /**< RX LRO is ON(1) / OFF(0) */
> xstats_update: 1; /**< xstats update is ON(1) / OFF(0) */
>
> Define 3 functions:
>
> void rte_eth_xstats_update_enable(uint8_t port_id); void
> rte_eth_xstats_update_disable(uint8_t port_id); int
> rte_eth_xstats_update_get(uint8_t port_id);
>
> Or define two:
>
> /* uint8_t xstats_update ; 1 on, 0 off*/ void
> rte_eth_xstats_update_enable(uint8_t port_id, uint8_t xstats_update); int
> rte_eth_xstats_update_get(uint8_t port_id);
>
> In the struct eth_dev_ops, adding two functions to pass xstats_update to
> driver, because the rxq/txq can't access xstats_update directly.
> So, add a xstats flag per queue data structure.
> for example
> struct vhost_queue {
> ......
> Uint64_t xstats_flag;
> ......
> };
> typedef uint16_t (*eth_rx_burst_t)(void *rxq,
> struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
> typedef uint16_t (*eth_tx_burst_t)(void *txq,
> struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts);
>
> struct eth_dev_ops {
> ......
> eth_xstats_update_enable_t xstats_update_enable; /**< xstats
> ON. */
> eth_xstats_update_disable_t xstats_update_disable;/**< xstats
> OFF. */ ......
> };
>
> --zhiyong--
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH] vhost: add pmd xstats
2016-08-31 7:18 ` Yang, Zhiyong
@ 2016-09-01 6:37 ` Yang, Zhiyong
0 siblings, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-01 6:37 UTC (permalink / raw)
To: dev; +Cc: Liu, Yuanhan, Thomas Monjalon, Wang, Zhihong, Yao, Lei A
Hi, all:
> -----Original Message-----
> From: Yang, Zhiyong
> Sent: Wednesday, August 31, 2016 3:19 PM
> To: dev@dpdk.org
> Cc: Panu Matilainen <pmatilai@redhat.com>; Liu, Yuanhan
> <yuanhan.liu@intel.com>; Thomas Monjalon
> <thomas.monjalon@6wind.com>; Yao, Lei A <lei.a.yao@intel.com>; Yang,
> Zhiyong <zhiyong.yang@intel.com>; Wang, Zhihong
> <zhihong.wang@intel.com>
> Subject: RE: [dpdk-dev] [PATCH] vhost: add pmd xstats
>
> Hi, ALL:
>
> Physical NIC has a set of counters, such as
> u64 prc64;
> u64 prc127;
> u64 prc255; etc.
> but now, DPDK has counted the prc64 in two ways. Physical NIC counts
> prc64 with CRC by hardware. Virtio computes the counter like prc64 without
> CRC. This will cause the conflict, when a 64 packet from outer network is sent
> to VM(virtio), NIC will show prc64 + 1, virtio will actually receive the 64-4(CRC)
> = 60 bytes pkt, undersize(<64) counter will be increased. Should Vhost do like
> NIC's behavior or virtio's behavior?
>
> According to rfc2819 description as referece.
> etherStatsPkts64Octets OBJECT-TYPE
> SYNTAX Counter32
> UNITS "Packets"
> "The total number of packets (including bad packets) received that were
> 64 octets in length (excluding framing bits but including FCS octets)."
>
I consult the requirement from dev@openvswitch.com, Jesse Gross
<jesse@kernel.org> reply the question as following:
All other stats in OVS (including flows and other counters that don't come
from hardware) count bytes without the CRC. Presumably it would be best to
adjust the physical NIC stats with DPDK to do the same.
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
2016-08-19 12:16 [dpdk-dev] [PATCH] vhost: add pmd xstats Zhiyong Yang
2016-08-22 7:52 ` Panu Matilainen
@ 2016-09-09 8:15 ` Zhiyong Yang
2016-09-09 8:40 ` Van Haaren, Harry
` (2 more replies)
1 sibling, 3 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-09 8:15 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, thomas.monjalon, pmatilai, Zhiyong Yang
This feature adds vhost pmd extended statistics from per queue perspective
for the application such as OVS etc.
The statistics counters are based on RFC 2819 and RFC 2863 as follows:
rx/tx_good_packets
rx/tx_total_bytes
rx/tx_dropped_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_ucast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;
No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics.
The usage of vhost pmd xstats is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:
show port xstats all | port_id will show vhost xstats
clear port xstats all | port_id will reset vhost xstats
net/virtio pmd xstats(the function virtio_update_packet_stats) is used
as reference when implementing the feature.
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
drivers/net/vhost/rte_eth_vhost.c | 282 +++++++++++++++++++++++++++++++++++++-
1 file changed, 281 insertions(+), 1 deletion(-)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7539cd4..8f805f3 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,6 +72,10 @@ static struct ether_addr base_eth_addr = {
}
};
+struct vhost_xstats {
+ uint64_t stat[16];
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -85,7 +89,8 @@ struct vhost_queue {
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
-};
+ struct vhost_xstats xstats;
+ };
struct pmd_internal {
char *dev_name;
@@ -127,6 +132,274 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+enum rte_vhostqueue_rxtx {
+ RTE_VHOSTQUEUE_RX = 0,
+ RTE_VHOSTQUEUE_TX = 1
+};
+
+#define RTE_ETH_VHOST_XSTATS_NAME_SIZE 64
+
+struct rte_vhost_xstats_name_off {
+ char name[RTE_ETH_VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rt]_qX_ is prepended to the name string here */
+static const struct rte_vhost_xstats_name_off rte_vhost_rxq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, rx_pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, rx_bytes)},
+ {"dropped_pkts",
+ offsetof(struct vhost_queue, missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, xstats.stat[8])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, xstats.stat[9])},
+ {"ucast_packets",
+ offsetof(struct vhost_queue, xstats.stat[10])},
+ {"undersize_errors",
+ offsetof(struct vhost_queue, xstats.stat[0])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, xstats.stat[1])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, xstats.stat[2])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, xstats.stat[3])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, xstats.stat[4])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, xstats.stat[5])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, xstats.stat[6])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, xstats.stat[7])},
+ {"errors",
+ offsetof(struct vhost_queue, xstats.stat[11])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, xstats.stat[12])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, xstats.stat[13])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, xstats.stat[14])},
+};
+
+/* [tx]_qX_ is prepended to the name string here */
+static const struct rte_vhost_xstats_name_off rte_vhost_txq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, tx_pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, tx_bytes)},
+ {"dropped_pkts",
+ offsetof(struct vhost_queue, missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, xstats.stat[8])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, xstats.stat[9])},
+ {"ucast_packets",
+ offsetof(struct vhost_queue, xstats.stat[10])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, xstats.stat[1])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, xstats.stat[2])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, xstats.stat[3])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, xstats.stat[4])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, xstats.stat[5])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, xstats.stat[6])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, xstats.stat[7])},
+ {"errors",
+ offsetof(struct vhost_queue, xstats.stat[11])},
+};
+
+#define VHOST_NB_RXQ_XSTATS (sizeof(rte_vhost_rxq_stat_strings) / \
+ sizeof(rte_vhost_rxq_stat_strings[0]))
+
+#define VHOST_NB_TXQ_XSTATS (sizeof(rte_vhost_txq_stat_strings) / \
+ sizeof(rte_vhost_txq_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vqrx = NULL;
+ struct vhost_queue *vqtx = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+ vqrx = (struct vhost_queue *)dev->data->rx_queues[i];
+ vqrx->rx_pkts = 0;
+ vqrx->rx_bytes = 0;
+ vqrx->missed_pkts = 0;
+ memset(&vqrx->xstats, 0, sizeof(vqrx->xstats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+ vqtx = (struct vhost_queue *)dev->data->tx_queues[i];
+ vqtx->tx_pkts = 0;
+ vqtx->tx_bytes = 0;
+ vqtx->missed_pkts = 0;
+ memset(&vqtx->xstats, 0, sizeof(vqtx->xstats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ unsigned int i = 0;
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (xstats_names) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vhost_queue *rxvq = dev->data->rx_queues[i];
+
+ if (!rxvq)
+ continue;
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ rte_vhost_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vhost_queue *txvq = dev->data->tx_queues[i];
+
+ if (!txvq)
+ continue;
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ rte_vhost_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+ return count;
+ }
+ return nstats;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+
+ unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vhost_queue *rxvq =
+ (struct vhost_queue *)dev->data->rx_queues[i];
+
+ if (!rxvq)
+ continue;
+
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ xstats[count].value = *(uint64_t *)(((char *)rxvq)
+ + rte_vhost_rxq_stat_strings[t].offset);
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vhost_queue *txvq =
+ (struct vhost_queue *)dev->data->tx_queues[i];
+
+ if (!txvq)
+ continue;
+
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ xstats[count].value = *(uint64_t *)(((char *)txvq)
+ + rte_vhost_txq_stat_strings[t].offset);
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t nb_rxtx,
+ uint16_t nb_bufs,
+ enum rte_vhostqueue_rxtx vqueue_rxtx)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct ether_addr *ea = NULL;
+ struct vhost_xstats *xstats_update = &vq->xstats;
+
+ for (i = 0; i < nb_rxtx ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ xstats_update->stat[1]++;
+
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ xstats_update->stat[index]++;
+ } else {
+ if (pkt_len < 64)
+ xstats_update->stat[0]++;
+ else if (pkt_len <= 1522)
+ xstats_update->stat[6]++;
+ else if (pkt_len > 1522)
+ xstats_update->stat[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ /* broadcast++; */
+ xstats_update->stat[8]++;
+ else
+ /* multicast++; */
+ xstats_update->stat[9]++;
+ }
+ }
+ /* non-multi/broadcast, multi/broadcast, including those
+ * that were discarded or not sent. from rfc2863
+ */
+ if (vqueue_rxtx == RTE_VHOSTQUEUE_RX) {
+ xstats_update->stat[10] = vq->rx_pkts + vq->missed_pkts
+ - (xstats_update->stat[8]
+ + xstats_update->stat[9]);
+ } else {
+ for (i = nb_rxtx; i < nb_bufs ; i++) {
+ ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ xstats_update->stat[8]++;
+ else
+ xstats_update->stat[9]++;
+ }
+ }
+ xstats_update->stat[10] = vq->tx_pkts + vq->missed_pkts
+ - (xstats_update->stat[8] + xstats_update->stat[9]);
+ }
+}
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -152,6 +425,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
r->rx_bytes += bufs[i]->pkt_len;
}
+ vhost_update_packet_xstats(r, bufs, nb_rx, nb_rx, RTE_VHOSTQUEUE_RX);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -182,6 +457,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_tx); i++)
r->tx_bytes += bufs[i]->pkt_len;
+ vhost_update_packet_xstats(r, bufs, nb_tx, nb_bufs, RTE_VHOSTQUEUE_TX);
+
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
out:
@@ -682,6 +959,9 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
};
static int
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
2016-09-09 8:15 ` [dpdk-dev] [PATCH v2] net/vhost: " Zhiyong Yang
@ 2016-09-09 8:40 ` Van Haaren, Harry
2016-09-09 8:54 ` Yang, Zhiyong
2016-09-14 6:20 ` Yuanhan Liu
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2 siblings, 1 reply; 57+ messages in thread
From: Van Haaren, Harry @ 2016-09-09 8:40 UTC (permalink / raw)
To: Yang, Zhiyong, dev
Cc: yuanhan.liu, thomas.monjalon, mailto:pmatilai, Yang, Zhiyong
Hi Zhiyong,
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhiyong Yang
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
>
> +struct vhost_xstats {
> + uint64_t stat[16];
> +};
Perhaps we could create an enum to access the stat array?
enum VHOST_STAT {
...
VHOST_STAT_64_PKT,
...
};
> + {"broadcast_packets",
> + offsetof(struct vhost_queue, xstats.stat[8])},
I think the "magic number" 8 here could be from the enum, and would be more clear which statistic is being accessed.
> + if (pkt_len == 64) {
> + xstats_update->stat[1]++;
Similarly, incrementing the counters would be more representative if it looked like
xstats_update->stat[VHOST_STAT_64_PKT]++; /* or similar */
-Harry
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
2016-09-09 8:40 ` Van Haaren, Harry
@ 2016-09-09 8:54 ` Yang, Zhiyong
0 siblings, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-09 8:54 UTC (permalink / raw)
To: Van Haaren, Harry, dev; +Cc: yuanhan.liu, thomas.monjalon
Hi, Harry:
Your idea looks very good.
Thanks
--Zhiyong
> -----Original Message-----
> From: Van Haaren, Harry
> Sent: Friday, September 9, 2016 4:41 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>; dev@dpdk.org
> Cc: yuanhan.liu@linux.intel.com; thomas.monjalon@6wind.com;
> "mailto:pmatilai"@redhat.com; Yang, Zhiyong <zhiyong.yang@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
>
> Hi Zhiyong,
>
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhiyong Yang
> > To: dev@dpdk.org
> > Subject: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
> >
> > +struct vhost_xstats {
> > + uint64_t stat[16];
> > +};
>
> Perhaps we could create an enum to access the stat array?
>
> enum VHOST_STAT {
> ...
> VHOST_STAT_64_PKT,
> ...
> };
>
> > + {"broadcast_packets",
> > + offsetof(struct vhost_queue, xstats.stat[8])},
>
> I think the "magic number" 8 here could be from the enum, and would be
> more clear which statistic is being accessed.
>
> > + if (pkt_len == 64) {
> > + xstats_update->stat[1]++;
>
> Similarly, incrementing the counters would be more representative if it
> looked like
>
> xstats_update->stat[VHOST_STAT_64_PKT]++; /* or similar */
>
> -Harry
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
2016-09-09 8:15 ` [dpdk-dev] [PATCH v2] net/vhost: " Zhiyong Yang
2016-09-09 8:40 ` Van Haaren, Harry
@ 2016-09-14 6:20 ` Yuanhan Liu
2016-09-14 7:43 ` Yang, Zhiyong
2016-09-14 8:30 ` Yang, Zhiyong
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2 siblings, 2 replies; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-14 6:20 UTC (permalink / raw)
To: Zhiyong Yang; +Cc: dev, thomas.monjalon, pmatilai
On Fri, Sep 09, 2016 at 04:15:27PM +0800, Zhiyong Yang wrote:
> +struct vhost_xstats {
> + uint64_t stat[16];
> +};
> +
> struct vhost_queue {
> int vid;
> rte_atomic32_t allow_queuing;
> @@ -85,7 +89,8 @@ struct vhost_queue {
> uint64_t missed_pkts;
> uint64_t rx_bytes;
> uint64_t tx_bytes;
I'd suggest to put those statistic counters to vhost_stats struct,
which could simplify the xstats_reset code a bit.
And please do it in two patches, one to introduce vhost_stats, another
one to add xstats.
> -};
> + struct vhost_xstats xstats;
> + };
A format issue here.
>
> struct pmd_internal {
> char *dev_name;
> @@ -127,6 +132,274 @@ struct rte_vhost_vring_state {
>
> static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
>
> +enum rte_vhostqueue_rxtx {
Don't use "rte_" prefix for internal function, vars, etc. "rte_" prefix is
reserved for those that will be exported for public use.
> + RTE_VHOSTQUEUE_RX = 0,
> + RTE_VHOSTQUEUE_TX = 1
> +};
> +
> +#define RTE_ETH_VHOST_XSTATS_NAME_SIZE 64
ditto.
> +
> +struct rte_vhost_xstats_name_off {
ditto.
> + char name[RTE_ETH_VHOST_XSTATS_NAME_SIZE];
> + uint64_t offset;
> +};
> +
> +/* [rt]_qX_ is prepended to the name string here */
> +static void
> +vhost_dev_xstats_reset(struct rte_eth_dev *dev)
> +{
> + struct vhost_queue *vqrx = NULL;
> + struct vhost_queue *vqtx = NULL;
> + unsigned int i = 0;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + if (!dev->data->rx_queues[i])
> + continue;
> + vqrx = (struct vhost_queue *)dev->data->rx_queues[i];
Unnecessary cast.
> + vqrx->rx_pkts = 0;
> + vqrx->rx_bytes = 0;
> + vqrx->missed_pkts = 0;
> + memset(&vqrx->xstats, 0, sizeof(vqrx->xstats));
> + }
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + if (!dev->data->tx_queues[i])
> + continue;
> + vqtx = (struct vhost_queue *)dev->data->tx_queues[i];
> + vqtx->tx_pkts = 0;
> + vqtx->tx_bytes = 0;
> + vqtx->missed_pkts = 0;
> + memset(&vqtx->xstats, 0, sizeof(vqtx->xstats));
> + }
> +}
> +
> +static int
> +vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
> + struct rte_eth_xstat_name *xstats_names,
> + __rte_unused unsigned int limit)
The typical way is to put __rte_unused after the key word.
> +{
> + unsigned int i = 0;
> + unsigned int t = 0;
> + int count = 0;
> + int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> + + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
> +
> + if (xstats_names) {
I know you are following virtio pmd style, but you don't have to. I'd
suggest to return early for (!xstats_names) case, then we could save
one indention level for following code block.
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + struct vhost_queue *rxvq = dev->data->rx_queues[i];
> +
> + if (!rxvq)
> + continue;
> + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> + snprintf(xstats_names[count].name,
> + sizeof(xstats_names[count].name),
> + "rx_q%u_%s", i,
> + rte_vhost_rxq_stat_strings[t].name);
> + count++;
> + }
> + }
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + struct vhost_queue *txvq = dev->data->tx_queues[i];
> +
> + if (!txvq)
> + continue;
> + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> + snprintf(xstats_names[count].name,
> + sizeof(xstats_names[count].name),
> + "tx_q%u_%s", i,
> + rte_vhost_txq_stat_strings[t].name);
> + count++;
> + }
> + }
> + return count;
> + }
> + return nstats;
> +}
> +
> +static int
> +vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
> + unsigned int n)
> +{
> + unsigned int i;
> + unsigned int t;
> + unsigned int count = 0;
> +
> + unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> + + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
> +
> + if (n < nxstats)
> + return nxstats;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + struct vhost_queue *rxvq =
> + (struct vhost_queue *)dev->data->rx_queues[i];
Unnecessary cast.
> +
> + if (!rxvq)
> + continue;
> +
> + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> + xstats[count].value = *(uint64_t *)(((char *)rxvq)
> + + rte_vhost_rxq_stat_strings[t].offset);
> + count++;
> + }
> + }
> +
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + struct vhost_queue *txvq =
> + (struct vhost_queue *)dev->data->tx_queues[i];
> +
> + if (!txvq)
> + continue;
> +
> + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> + xstats[count].value = *(uint64_t *)(((char *)txvq)
> + + rte_vhost_txq_stat_strings[t].offset);
> + count++;
> + }
> + }
> +
> + return count;
> +}
> +
> +static void
> +vhost_update_packet_xstats(struct vhost_queue *vq,
> + struct rte_mbuf **bufs,
> + uint16_t nb_rxtx,
> + uint16_t nb_bufs,
> + enum rte_vhostqueue_rxtx vqueue_rxtx)
> +{
> + uint32_t pkt_len = 0;
> + uint64_t i = 0;
> + uint64_t index;
> + struct ether_addr *ea = NULL;
> + struct vhost_xstats *xstats_update = &vq->xstats;
> +
> + for (i = 0; i < nb_rxtx ; i++) {
> + pkt_len = bufs[i]->pkt_len;
> + if (pkt_len == 64) {
> + xstats_update->stat[1]++;
> +
Unnecessary blank line.
> + } else if (pkt_len > 64 && pkt_len < 1024) {
> + index = (sizeof(pkt_len) * 8)
> + - __builtin_clz(pkt_len) - 5;
> + xstats_update->stat[index]++;
> + } else {
> + if (pkt_len < 64)
> + xstats_update->stat[0]++;
> + else if (pkt_len <= 1522)
> + xstats_update->stat[6]++;
> + else if (pkt_len > 1522)
> + xstats_update->stat[7]++;
> + }
> +
> + ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
> + if (is_multicast_ether_addr(ea)) {
> + if (is_broadcast_ether_addr(ea))
> + /* broadcast++; */
> + xstats_update->stat[8]++;
> + else
> + /* multicast++; */
> + xstats_update->stat[9]++;
The comment could be avoided if you define a field in vhost_stats
struct like "broadcast" or "multicast". I don't object the way Harry
proposed tough, to use enum to access the stat array.
> + }
> + }
> + /* non-multi/broadcast, multi/broadcast, including those
> + * that were discarded or not sent.
Hmmm, I don't follow it. You may want to reword it.
> from rfc2863
Which section and which page?
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
2016-09-14 6:20 ` Yuanhan Liu
@ 2016-09-14 7:43 ` Yang, Zhiyong
2016-09-18 13:16 ` Yuanhan Liu
2016-09-14 8:30 ` Yang, Zhiyong
1 sibling, 1 reply; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-14 7:43 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, thomas.monjalon, pmatilai
Hi, yuanhan:
Thanks so much for your detailed comments.
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Wednesday, September 14, 2016 2:20 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com
> Subject: Re: [PATCH v2] net/vhost: add pmd xstats
>
> On Fri, Sep 09, 2016 at 04:15:27PM +0800, Zhiyong Yang wrote:
> > +struct vhost_xstats {
> > + uint64_t stat[16];
> > +};
> > +
> > struct vhost_queue {
> > int vid;
> > rte_atomic32_t allow_queuing;
> > @@ -85,7 +89,8 @@ struct vhost_queue {
> > uint64_t missed_pkts;
> > uint64_t rx_bytes;
> > uint64_t tx_bytes;
>
> I'd suggest to put those statistic counters to vhost_stats struct, which could
> simplify the xstats_reset code a bit.
>
I consider this point when I define it, but those statistic counters are used by pmd stats,
not only by pmd xstats, I'm not clear if I can modify those code. If permitted,
I will do it as you said.
> And please do it in two patches, one to introduce vhost_stats, another one
> to add xstats.
>
Ok.
> > -};
> > + struct vhost_xstats xstats;
> > + };
>
> A format issue here.
>
Naming issue? such as struct vhost_xstats vhost_stats;?
> >
> > struct pmd_internal {
> > char *dev_name;
> > @@ -127,6 +132,274 @@ struct rte_vhost_vring_state {
> >
> > static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
> >
> > +enum rte_vhostqueue_rxtx {
>
> Don't use "rte_" prefix for internal function, vars, etc. "rte_" prefix is
> reserved for those that will be exported for public use.
>
Ok, I will modify it.
> > + RTE_VHOSTQUEUE_RX = 0,
> > + RTE_VHOSTQUEUE_TX = 1
> > +};
> > +
> > +#define RTE_ETH_VHOST_XSTATS_NAME_SIZE 64
>
> ditto.
>
Ok, I will modify it.
> > +
> > +struct rte_vhost_xstats_name_off {
>
> ditto.
>
Ok, I will modify it.
> > + char name[RTE_ETH_VHOST_XSTATS_NAME_SIZE];
> > + uint64_t offset;
> > +};
> > +
> > +/* [rt]_qX_ is prepended to the name string here */ static void
> > +vhost_dev_xstats_reset(struct rte_eth_dev *dev) {
> > + struct vhost_queue *vqrx = NULL;
> > + struct vhost_queue *vqtx = NULL;
> > + unsigned int i = 0;
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > + if (!dev->data->rx_queues[i])
> > + continue;
> > + vqrx = (struct vhost_queue *)dev->data->rx_queues[i];
>
> Unnecessary cast.
>
The definition of rx_queues is void **rx_queues; when we use it to access its
Data member, Maybe explicit cast is needed, otherwise, we have to cast
Every time when using it.
> > + vqrx->rx_pkts = 0;
> > + vqrx->rx_bytes = 0;
> > + vqrx->missed_pkts = 0;
> > + memset(&vqrx->xstats, 0, sizeof(vqrx->xstats));
> > + }
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > + if (!dev->data->tx_queues[i])
> > + continue;
> > + vqtx = (struct vhost_queue *)dev->data->tx_queues[i];
> > + vqtx->tx_pkts = 0;
> > + vqtx->tx_bytes = 0;
> > + vqtx->missed_pkts = 0;
> > + memset(&vqtx->xstats, 0, sizeof(vqtx->xstats));
> > + }
> > +}
> > +
> > +static int
> > +vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
> > + struct rte_eth_xstat_name *xstats_names,
> > + __rte_unused unsigned int limit)
>
> The typical way is to put __rte_unused after the key word.
>
Ok.
> > +{
> > + unsigned int i = 0;
> > + unsigned int t = 0;
> > + int count = 0;
> > + int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> > + + dev->data->nb_tx_queues *
> VHOST_NB_TXQ_XSTATS;
> > +
> > + if (xstats_names) {
>
> I know you are following virtio pmd style, but you don't have to. I'd suggest
> to return early for (!xstats_names) case, then we could save one indention
> level for following code block.
>
OK.
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > + struct vhost_queue *rxvq = dev->data-
> >rx_queues[i];
> > +
> > + if (!rxvq)
> > + continue;
> > + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> > + snprintf(xstats_names[count].name,
> > + sizeof(xstats_names[count].name),
> > + "rx_q%u_%s", i,
> > + rte_vhost_rxq_stat_strings[t].name);
> > + count++;
> > + }
> > + }
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > + struct vhost_queue *txvq = dev->data-
> >tx_queues[i];
> > +
> > + if (!txvq)
> > + continue;
> > + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> > + snprintf(xstats_names[count].name,
> > + sizeof(xstats_names[count].name),
> > + "tx_q%u_%s", i,
> > + rte_vhost_txq_stat_strings[t].name);
> > + count++;
> > + }
> > + }
> > + return count;
> > + }
> > + return nstats;
> > +}
> > +
> > +static int
> > +vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat
> *xstats,
> > + unsigned int n)
> > +{
> > + unsigned int i;
> > + unsigned int t;
> > + unsigned int count = 0;
> > +
> > + unsigned int nxstats = dev->data->nb_rx_queues *
> VHOST_NB_RXQ_XSTATS
> > + + dev->data->nb_tx_queues *
> VHOST_NB_TXQ_XSTATS;
> > +
> > + if (n < nxstats)
> > + return nxstats;
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > + struct vhost_queue *rxvq =
> > + (struct vhost_queue *)dev->data->rx_queues[i];
>
> Unnecessary cast.
>
Ok. I will remove it.
> > +
> > + if (!rxvq)
> > + continue;
> > +
> > + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> > + xstats[count].value = *(uint64_t *)(((char *)rxvq)
> > + + rte_vhost_rxq_stat_strings[t].offset);
> > + count++;
> > + }
> > + }
> > +
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > + struct vhost_queue *txvq =
> > + (struct vhost_queue *)dev->data->tx_queues[i];
> > +
> > + if (!txvq)
> > + continue;
> > +
> > + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> > + xstats[count].value = *(uint64_t *)(((char *)txvq)
> > + + rte_vhost_txq_stat_strings[t].offset);
> > + count++;
> > + }
> > + }
> > +
> > + return count;
> > +}
> > +
> > +static void
> > +vhost_update_packet_xstats(struct vhost_queue *vq,
> > + struct rte_mbuf **bufs,
> > + uint16_t nb_rxtx,
> > + uint16_t nb_bufs,
> > + enum rte_vhostqueue_rxtx vqueue_rxtx) {
> > + uint32_t pkt_len = 0;
> > + uint64_t i = 0;
> > + uint64_t index;
> > + struct ether_addr *ea = NULL;
> > + struct vhost_xstats *xstats_update = &vq->xstats;
> > +
> > + for (i = 0; i < nb_rxtx ; i++) {
> > + pkt_len = bufs[i]->pkt_len;
> > + if (pkt_len == 64) {
> > + xstats_update->stat[1]++;
> > +
> Unnecessary blank line.
>
Ok. I will remove it.
> > + } else if (pkt_len > 64 && pkt_len < 1024) {
> > + index = (sizeof(pkt_len) * 8)
> > + - __builtin_clz(pkt_len) - 5;
> > + xstats_update->stat[index]++;
> > + } else {
> > + if (pkt_len < 64)
> > + xstats_update->stat[0]++;
> > + else if (pkt_len <= 1522)
> > + xstats_update->stat[6]++;
> > + else if (pkt_len > 1522)
> > + xstats_update->stat[7]++;
> > + }
> > +
> > + ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
> > + if (is_multicast_ether_addr(ea)) {
> > + if (is_broadcast_ether_addr(ea))
> > + /* broadcast++; */
> > + xstats_update->stat[8]++;
> > + else
> > + /* multicast++; */
> > + xstats_update->stat[9]++;
>
> The comment could be avoided if you define a field in vhost_stats struct like
> "broadcast" or "multicast". I don't object the way Harry proposed tough, to
> use enum to access the stat array.
>
I agree with you and Harry and will do like that.
> > + }
> > + }
> > + /* non-multi/broadcast, multi/broadcast, including those
> > + * that were discarded or not sent.
>
> Hmmm, I don't follow it. You may want to reword it.
>
> > from rfc2863
>
> Which section and which page?
>
The packets received are not considered "discarded", because receiving packets via
Memory, not by physical NIC. Mainly for the number of transmit the packets
RFC2863 page 35 ifOutUcastPkts(non-multi/broadcast)
Page 42 ifHCOutMulticastPkts
Page 42 ifHCOutBroadcastPkts
> --yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
2016-09-14 6:20 ` Yuanhan Liu
2016-09-14 7:43 ` Yang, Zhiyong
@ 2016-09-14 8:30 ` Yang, Zhiyong
1 sibling, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-14 8:30 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, thomas.monjalon, pmatilai
Hi, Yuanhan:
Sorry to misunderstand your comment.
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Wednesday, September 14, 2016 2:20 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com
> Subject: Re: [PATCH v2] net/vhost: add pmd xstats
> > -};
> > + struct vhost_xstats xstats;
> > + };
>
> A format issue here.
>
Ok. I understand it (unnecessary Tab)and will modify it.
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
2016-09-14 7:43 ` Yang, Zhiyong
@ 2016-09-18 13:16 ` Yuanhan Liu
2016-09-19 2:48 ` Yang, Zhiyong
0 siblings, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-18 13:16 UTC (permalink / raw)
To: Yang, Zhiyong; +Cc: dev, thomas.monjalon, pmatilai
On Wed, Sep 14, 2016 at 07:43:36AM +0000, Yang, Zhiyong wrote:
> Hi, yuanhan:
> Thanks so much for your detailed comments.
>
> > -----Original Message-----
> > From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> > Sent: Wednesday, September 14, 2016 2:20 PM
> > To: Yang, Zhiyong <zhiyong.yang@intel.com>
> > Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com
> > Subject: Re: [PATCH v2] net/vhost: add pmd xstats
> >
> > On Fri, Sep 09, 2016 at 04:15:27PM +0800, Zhiyong Yang wrote:
> > > +struct vhost_xstats {
> > > + uint64_t stat[16];
> > > +};
> > > +
> > > struct vhost_queue {
> > > int vid;
> > > rte_atomic32_t allow_queuing;
> > > @@ -85,7 +89,8 @@ struct vhost_queue {
> > > uint64_t missed_pkts;
> > > uint64_t rx_bytes;
> > > uint64_t tx_bytes;
> >
> > I'd suggest to put those statistic counters to vhost_stats struct, which could
> > simplify the xstats_reset code a bit.
> >
>
> I consider this point when I define it, but those statistic counters are used by pmd stats,
> not only by pmd xstats, I'm not clear if I can modify those code. If permitted,
> I will do it as you said.
For sure you can modify the code :) I just would suggest to do in an
single patch, as stated before (and below).
> > > + char name[RTE_ETH_VHOST_XSTATS_NAME_SIZE];
> > > + uint64_t offset;
> > > +};
> > > +
> > > +/* [rt]_qX_ is prepended to the name string here */ static void
> > > +vhost_dev_xstats_reset(struct rte_eth_dev *dev) {
> > > + struct vhost_queue *vqrx = NULL;
> > > + struct vhost_queue *vqtx = NULL;
> > > + unsigned int i = 0;
> > > +
> > > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > > + if (!dev->data->rx_queues[i])
> > > + continue;
> > > + vqrx = (struct vhost_queue *)dev->data->rx_queues[i];
> >
> > Unnecessary cast.
> >
> The definition of rx_queues is void **rx_queues;
Yes, but rx_queues[i] is with type "void *", so the cast is not necessary.
> > > + }
> > > + }
> > > + /* non-multi/broadcast, multi/broadcast, including those
> > > + * that were discarded or not sent.
> >
> > Hmmm, I don't follow it. You may want to reword it.
> >
> > > from rfc2863
> >
> > Which section and which page?
> >
> The packets received are not considered "discarded", because receiving packets via
> Memory, not by physical NIC. Mainly for the number of transmit the packets
It still took me some time to understand you.
> RFC2863 page 35 ifOutUcastPkts(non-multi/broadcast)
So, by your term, unicast == non-multicast/broadcast? If so, I'd suggest
to use term "unicast" but not something else: it just introduces confusions.
And in this case, I guess you were trying to say:
/*
* According to RFC 2863 section ifHCOutMulticastPkts, ... and ...,
* the counter "unicast", "multicast" and "broadcast" are also
* increased when packets are not transmitted successfully.
*/
Well, you might still need reword it.
After taking a bit closer look at the code, I'd suggest to do the countings
like following:
- introduce a help function to increase the "broadcast" or "multicast"
counter, say "count_multicast_broadcast(struct vhost_stats *stats, mbuf)".
- introduce a generic function to update the generic counters: this
function just counts those packets have been successfully transmitted
or received.
It also invoke above helper function for multicast counting.
It basically looks like the function vhost_update_packet_xstats,
execpt that it doesn't handle those failure packets: it will be
handled in following part.
- since the case "couting multicast and broadcast with failure packts"
only happens in the Tx side, we could do those countings only in
eth_vhost_tx():
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
missed = nb_bufs - nb_tx;
/* put above comment here */
if (missed) {
for_each_mbuf(mbuf) {
count_multicast_broadcast(&vq->stats, mbuf);
}
}
- there is no need to update "stat[10]" (unicast), since it can be calculated
from other counters, meaning we could just get the right value on query.
This could save some cycles.
Feel free to phone me if you have any doubts.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats
2016-09-18 13:16 ` Yuanhan Liu
@ 2016-09-19 2:48 ` Yang, Zhiyong
0 siblings, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-19 2:48 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, thomas.monjalon, pmatilai
Hi, Yuanhan:
Thanks a lot for your comments and suggestion in so detail. I will send v3 patch
as soon as possible.
--Zhiyong
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Sunday, September 18, 2016 9:17 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com
> Subject: Re: [PATCH v2] net/vhost: add pmd xstats
>
> On Wed, Sep 14, 2016 at 07:43:36AM +0000, Yang, Zhiyong wrote:
> > Hi, yuanhan:
> > Thanks so much for your detailed comments.
> >
> > > -----Original Message-----
> > > From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> > > Sent: Wednesday, September 14, 2016 2:20 PM
> > > To: Yang, Zhiyong <zhiyong.yang@intel.com>
> > > Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com
> > > Subject: Re: [PATCH v2] net/vhost: add pmd xstats
> > >
> > > On Fri, Sep 09, 2016 at 04:15:27PM +0800, Zhiyong Yang wrote:
> > > > +struct vhost_xstats {
> > > > + uint64_t stat[16];
> > > > +};
> > > > +
> > > > struct vhost_queue {
> > > > int vid;
> > > > rte_atomic32_t allow_queuing;
> > > > @@ -85,7 +89,8 @@ struct vhost_queue {
> > > > uint64_t missed_pkts;
> > > > uint64_t rx_bytes;
> > > > uint64_t tx_bytes;
> > >
> > > I'd suggest to put those statistic counters to vhost_stats struct,
> > > which could simplify the xstats_reset code a bit.
> > >
> >
> > I consider this point when I define it, but those statistic counters
> > are used by pmd stats, not only by pmd xstats, I'm not clear if I
> > can modify those code. If permitted, I will do it as you said.
>
> For sure you can modify the code :) I just would suggest to do in an single
> patch, as stated before (and below).
>
Ok. I will do that.
> > > > + char name[RTE_ETH_VHOST_XSTATS_NAME_SIZE];
> > > > + uint64_t offset;
> > > > +};
> > > > +
> > > > +/* [rt]_qX_ is prepended to the name string here */ static void
> > > > +vhost_dev_xstats_reset(struct rte_eth_dev *dev) {
> > > > + struct vhost_queue *vqrx = NULL;
> > > > + struct vhost_queue *vqtx = NULL;
> > > > + unsigned int i = 0;
> > > > +
> > > > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > > > + if (!dev->data->rx_queues[i])
> > > > + continue;
> > > > + vqrx = (struct vhost_queue *)dev->data->rx_queues[i];
> > >
> > > Unnecessary cast.
> > >
> > The definition of rx_queues is void **rx_queues;
>
> Yes, but rx_queues[i] is with type "void *", so the cast is not necessary.
>
I'm confused if void * ptr can access a member of struct directly. Or Need
I explicitly cast it when using it?
> > > > + }
> > > > + }
> > > > + /* non-multi/broadcast, multi/broadcast, including those
> > > > + * that were discarded or not sent.
> > >
> > > Hmmm, I don't follow it. You may want to reword it.
> > >
> > > > from rfc2863
> > >
> > > Which section and which page?
> > >
> > The packets received are not considered "discarded", because receiving
> > packets via Memory, not by physical NIC. Mainly for the number of
> > transmit the packets
>
> It still took me some time to understand you.
>
> > RFC2863 page 35 ifOutUcastPkts(non-multi/broadcast)
>
> So, by your term, unicast == non-multicast/broadcast? If so, I'd suggest to
> use term "unicast" but not something else: it just introduces confusions.
>
You are right . non-multicast/broadcast is equal to unicast. I agree with you
And will modify it as you say.
> And in this case, I guess you were trying to say:
>
> /*
> * According to RFC 2863 section ifHCOutMulticastPkts, ... and ...,
> * the counter "unicast", "multicast" and "broadcast" are also
> * increased when packets are not transmitted successfully.
> */
>
Yes.
> Well, you might still need reword it.
>
> After taking a bit closer look at the code, I'd suggest to do the countings like
> following:
>
> - introduce a help function to increase the "broadcast" or "multicast"
> counter, say "count_multicast_broadcast(struct vhost_stats *stats, mbuf)".
>
> - introduce a generic function to update the generic counters: this
> function just counts those packets have been successfully transmitted
> or received.
>
> It also invoke above helper function for multicast counting.
>
> It basically looks like the function vhost_update_packet_xstats,
> execpt that it doesn't handle those failure packets: it will be
> handled in following part.
>
> - since the case "couting multicast and broadcast with failure packts"
> only happens in the Tx side, we could do those countings only in
> eth_vhost_tx():
>
> nb_tx = rte_vhost_enqueue_burst(r->vid,
> r->virtqueue_id, bufs, nb_bufs);
>
> missed = nb_bufs - nb_tx;
> /* put above comment here */
> if (missed) {
> for_each_mbuf(mbuf) {
> count_multicast_broadcast(&vq->stats, mbuf);
> }
> }
>
> - there is no need to update "stat[10]" (unicast), since it can be calculated
> from other counters, meaning we could just get the right value on query.
>
> This could save some cycles.
>
Ok, Your comments are detailed and clear. I will consider to remove unicast
and modify update functions.
> Feel free to phone me if you have any doubts.
>
> --yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v3 0/2] net/vhost: add pmd xstats support
2016-09-09 8:15 ` [dpdk-dev] [PATCH v2] net/vhost: " Zhiyong Yang
2016-09-09 8:40 ` Van Haaren, Harry
2016-09-14 6:20 ` Yuanhan Liu
@ 2016-09-20 9:36 ` Zhiyong Yang
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct Zhiyong Yang
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats Zhiyong Yang
2 siblings, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-20 9:36 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, thomas.monjalon, pmatilai, harry.van.haaren
This patch set adds the vhost pmd xstats support.
Patch 1 moves all stats counters to a new defined struct vhost_stats,
in order to manage all stats counters in a unified way.
Patch 2 adds the pmd xstats support.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
---
Zhiyong Yang (2):
net/vhost: add a new stats struct
net/vhost: add pmd xstats
drivers/net/vhost/rte_eth_vhost.c | 306 +++++++++++++++++++++++++++++++++++---
1 file changed, 286 insertions(+), 20 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 0/2] net/vhost: add pmd xstats support Zhiyong Yang
@ 2016-09-20 9:36 ` Zhiyong Yang
2016-09-20 10:44 ` Yuanhan Liu
` (2 more replies)
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats Zhiyong Yang
1 sibling, 3 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-20 9:36 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, thomas.monjalon, pmatilai, harry.van.haaren, Zhiyong Yang
The patch moves all stats counters to a new defined struct vhost_stats
as follows, in order to manage all stats counters in a unified way and
simplify the subsequent function implementation(vhost_dev_xstats_reset).
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
drivers/net/vhost/rte_eth_vhost.c | 44 +++++++++++++++++++++------------------
1 file changed, 24 insertions(+), 20 deletions(-)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 80c3f4c..9157bf1 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,6 +72,14 @@ static struct ether_addr base_eth_addr = {
}
};
+struct vhost_stats {
+ uint64_t rx_pkts;
+ uint64_t tx_pkts;
+ uint64_t missed_pkts;
+ uint64_t rx_bytes;
+ uint64_t tx_bytes;
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -80,11 +88,7 @@ struct vhost_queue {
struct rte_mempool *mb_pool;
uint8_t port;
uint16_t virtqueue_id;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
- uint64_t missed_pkts;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
+ struct vhost_stats stats;
};
struct pmd_internal {
@@ -145,11 +149,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
- r->rx_pkts += nb_rx;
+ r->stats.rx_pkts += nb_rx;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- r->rx_bytes += bufs[i]->pkt_len;
+ r->stats.rx_bytes += bufs[i]->pkt_len;
}
out:
@@ -176,11 +180,11 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
- r->tx_pkts += nb_tx;
- r->missed_pkts += nb_bufs - nb_tx;
+ r->stats.tx_pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
for (i = 0; likely(i < nb_tx); i++)
- r->tx_bytes += bufs[i]->pkt_len;
+ r->stats.tx_bytes += bufs[i]->pkt_len;
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
@@ -582,10 +586,10 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = vq->rx_pkts;
+ stats->q_ipackets[i] = vq->stats.rx_pkts;
rx_total += stats->q_ipackets[i];
- stats->q_ibytes[i] = vq->rx_bytes;
+ stats->q_ibytes[i] = vq->stats.rx_bytes;
rx_total_bytes += stats->q_ibytes[i];
}
@@ -594,11 +598,11 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- stats->q_opackets[i] = vq->tx_pkts;
- tx_missed_total += vq->missed_pkts;
+ stats->q_opackets[i] = vq->stats.tx_pkts;
+ tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
- stats->q_obytes[i] = vq->tx_bytes;
+ stats->q_obytes[i] = vq->stats.tx_bytes;
tx_total_bytes += stats->q_obytes[i];
}
@@ -619,16 +623,16 @@ eth_stats_reset(struct rte_eth_dev *dev)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- vq->rx_pkts = 0;
- vq->rx_bytes = 0;
+ vq->stats.rx_pkts = 0;
+ vq->stats.rx_bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- vq->tx_pkts = 0;
- vq->tx_bytes = 0;
- vq->missed_pkts = 0;
+ vq->stats.tx_pkts = 0;
+ vq->stats.tx_bytes = 0;
+ vq->stats.missed_pkts = 0;
}
}
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct Zhiyong Yang
@ 2016-09-20 9:36 ` Zhiyong Yang
2016-09-20 10:56 ` Yuanhan Liu
2016-09-20 11:50 ` Yuanhan Liu
1 sibling, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-20 9:36 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, thomas.monjalon, pmatilai, harry.van.haaren, Zhiyong Yang
This feature adds vhost pmd extended statistics from per queue perspective
in order to meet the requirements of the applications such as OVS etc.
The statistics counters are based on RFC 2819 and RFC 2863 as follows:
rx/tx_good_packets
rx/tx_total_bytes
rx/tx_missed_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;
No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics.
The usage of vhost pmd xstats is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:
show port xstats all | port_id will show vhost xstats
clear port xstats all | port_id will reset vhost xstats
net/virtio pmd xstats(the function virtio_update_packet_stats) is used
as reference when implementing the feature.
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
drivers/net/vhost/rte_eth_vhost.c | 262 ++++++++++++++++++++++++++++++++++++++
1 file changed, 262 insertions(+)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 9157bf1..c3f404d 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -78,6 +78,7 @@ struct vhost_stats {
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
+ uint64_t xstats[16];
};
struct vhost_queue {
@@ -131,6 +132,252 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+enum vhost_xstats_pkts {
+ VHOST_UNDERSIZE_PKT = 0,
+ VHOST_64_PKT,
+ VHOST_65_TO_127_PKT,
+ VHOST_128_TO_255_PKT,
+ VHOST_256_TO_511_PKT,
+ VHOST_512_TO_1023_PKT,
+ VHOST_1024_TO_1522_PKT,
+ VHOST_1523_TO_MAX_PKT,
+ VHOST_BROADCAST_PKT,
+ VHOST_MULTICAST_PKT,
+ VHOST_ERRORS_PKT,
+ VHOST_ERRORS_FRAGMENTED,
+ VHOST_ERRORS_JABBER,
+ VHOST_UNKNOWN_PROTOCOL,
+};
+
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+ char name[VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rt]_qX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.rx_pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.rx_bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_qX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.tx_pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.tx_bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_RXQ_XSTATS (sizeof(vhost_rxq_stat_strings) / \
+ sizeof(vhost_rxq_stat_strings[0]))
+
+#define VHOST_NB_TXQ_XSTATS (sizeof(vhost_txq_stat_strings) / \
+ sizeof(vhost_txq_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vqrx = NULL;
+ struct vhost_queue *vqtx = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vqrx = dev->data->rx_queues[i];
+ if (!vqrx)
+ continue;
+ memset(&vqrx->stats, 0, sizeof(vqrx->stats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vqtx = dev->data->tx_queues[i];
+ if (!vqtx)
+ continue;
+ memset(&vqtx->stats, 0, sizeof(vqtx->stats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit __rte_unused)
+{
+ unsigned int i = 0;
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (!xstats_names)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ vhost_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ vhost_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+ return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+ unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)dev->data->rx_queues[i])
+ + vhost_rxq_stat_strings[t].offset);
+ count++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)dev->data->tx_queues[i])
+ + vhost_txq_stat_strings[t].offset);
+ count++;
+ }
+ }
+ return count;
+}
+
+static void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t begin,
+ uint16_t end)
+{
+ uint64_t i = 0;
+ struct ether_addr *ea = NULL;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = begin; i < end; i++) {
+ ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ pstats->xstats[VHOST_BROADCAST_PKT]++;
+ else
+ pstats->xstats[VHOST_MULTICAST_PKT]++;
+ }
+ }
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t nb_rxtx)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = 0; i < nb_rxtx ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ pstats->xstats[VHOST_64_PKT]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ pstats->xstats[index]++;
+ } else {
+ if (pkt_len < 64)
+ pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+ else if (pkt_len <= 1522)
+ pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+ else if (pkt_len > 1522)
+ pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+ }
+ }
+
+ vhost_count_multicast_broadcast(vq, bufs, 0, nb_rxtx);
+}
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -156,6 +403,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
r->stats.rx_bytes += bufs[i]->pkt_len;
}
+ vhost_update_packet_xstats(r, bufs, nb_rx);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -186,8 +435,18 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_tx); i++)
r->stats.tx_bytes += bufs[i]->pkt_len;
+ vhost_update_packet_xstats(r, bufs, nb_tx);
+
+ /* According to RFC2863 page42 section ifHCOutMulticastPkts and
+ * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+ * are increased when packets are not transmitted successfully.
+ */
+ if ((nb_bufs - nb_tx) > 0)
+ vhost_count_multicast_broadcast(r, bufs, nb_tx, nb_bufs);
+
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -686,6 +945,9 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
};
static int
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct Zhiyong Yang
@ 2016-09-20 10:44 ` Yuanhan Liu
2016-09-21 5:12 ` Yang, Zhiyong
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-21 10:13 ` [dpdk-dev] [PATCH v4 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2 siblings, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-20 10:44 UTC (permalink / raw)
To: Zhiyong Yang; +Cc: dev, thomas.monjalon, pmatilai, harry.van.haaren
On Tue, Sep 20, 2016 at 05:36:44PM +0800, Zhiyong Yang wrote:
> The patch moves all stats counters to a new defined struct vhost_stats
> as follows, in order to manage all stats counters in a unified way and
> simplify the subsequent function implementation(vhost_dev_xstats_reset).
>
> struct vhost_stats {
> uint64_t rx_pkts;
> uint64_t tx_pkts;
As I mentioned in last review (you may not notice it though), there is
no need to introduce rx_pkts and tx_pkts: only one of them will be used
for a specific queue. You could just use "pkts".
> uint64_t missed_pkts;
> uint64_t rx_bytes;
> uint64_t tx_bytes;
Ditto.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats Zhiyong Yang
@ 2016-09-20 10:56 ` Yuanhan Liu
2016-09-21 7:22 ` Yang, Zhiyong
2016-09-20 11:50 ` Yuanhan Liu
1 sibling, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-20 10:56 UTC (permalink / raw)
To: Zhiyong Yang; +Cc: dev, thomas.monjalon, pmatilai, harry.van.haaren
On Tue, Sep 20, 2016 at 05:36:45PM +0800, Zhiyong Yang wrote:
> diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
> index 9157bf1..c3f404d 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -78,6 +78,7 @@ struct vhost_stats {
> uint64_t missed_pkts;
> uint64_t rx_bytes;
> uint64_t tx_bytes;
> + uint64_t xstats[16];
> };
>
> struct vhost_queue {
> @@ -131,6 +132,252 @@ struct rte_vhost_vring_state {
>
> static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
>
> +enum vhost_xstats_pkts {
> + VHOST_UNDERSIZE_PKT = 0,
> + VHOST_64_PKT,
> + VHOST_65_TO_127_PKT,
> + VHOST_128_TO_255_PKT,
> + VHOST_256_TO_511_PKT,
> + VHOST_512_TO_1023_PKT,
> + VHOST_1024_TO_1522_PKT,
> + VHOST_1523_TO_MAX_PKT,
> + VHOST_BROADCAST_PKT,
> + VHOST_MULTICAST_PKT,
> + VHOST_ERRORS_PKT,
> + VHOST_ERRORS_FRAGMENTED,
> + VHOST_ERRORS_JABBER,
> + VHOST_UNKNOWN_PROTOCOL,
> +};
The definetion should go before "struct vhost_stats".
And VHOST_UNKNOWN_PROTOCOL should be VHOST_XSTATS_MAX. With that, the
hardcoded number "16" could be avoided and replaced by it.
> +
> +#define VHOST_XSTATS_NAME_SIZE 64
> +
> +struct vhost_xstats_name_off {
> + char name[VHOST_XSTATS_NAME_SIZE];
> + uint64_t offset;
> +};
> +
> +/* [rt]_qX_ is prepended to the name string here */
> +static const struct vhost_xstats_name_off vhost_rxq_stat_strings[] = {
> + {"good_packets",
> + offsetof(struct vhost_queue, stats.rx_pkts)},
> + {"total_bytes",
> + offsetof(struct vhost_queue, stats.rx_bytes)},
> + {"missed_pkts",
> + offsetof(struct vhost_queue, stats.missed_pkts)},
> + {"broadcast_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
> + {"multicast_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
> + {"undersize_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
> + {"size_64_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> + {"size_65_to_127_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
> + {"size_128_to_255_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
> + {"size_256_to_511_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
> + {"size_512_to_1023_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
> + {"size_1024_to_1522_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
> + {"size_1523_to_max_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
> + {"errors_with_bad_CRC",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
> + {"fragmented_errors",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
> + {"jabber_errors",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
> + {"unknown_protos_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
Hmm, is "unknown_protos_packets" a valid stats? Why we should handle it
here? Besides, it will always not be accessed, right? I mean,
VHOST_NB_RXQ_XSTATS makes sure that.
> +};
> +
> +/* [tx]_qX_ is prepended to the name string here */
> +static const struct vhost_xstats_name_off vhost_txq_stat_strings[] = {
> + {"good_packets",
> + offsetof(struct vhost_queue, stats.tx_pkts)},
> + {"total_bytes",
> + offsetof(struct vhost_queue, stats.tx_bytes)},
> + {"missed_pkts",
> + offsetof(struct vhost_queue, stats.missed_pkts)},
> + {"broadcast_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
> + {"multicast_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
> + {"size_64_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> + {"size_65_to_127_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
> + {"size_128_to_255_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
> + {"size_256_to_511_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
> + {"size_512_to_1023_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
> + {"size_1024_to_1522_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
> + {"size_1523_to_max_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
> + {"errors_with_bad_CRC",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
> +};
> +
> +#define VHOST_NB_RXQ_XSTATS (sizeof(vhost_rxq_stat_strings) / \
> + sizeof(vhost_rxq_stat_strings[0]))
> +
> +#define VHOST_NB_TXQ_XSTATS (sizeof(vhost_txq_stat_strings) / \
> + sizeof(vhost_txq_stat_strings[0]))
> +
> +static void
> +vhost_dev_xstats_reset(struct rte_eth_dev *dev)
> +{
> + struct vhost_queue *vqrx = NULL;
> + struct vhost_queue *vqtx = NULL;
I will not introduce two var here: I'd just define one, vq.
> + unsigned int i = 0;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + vqrx = dev->data->rx_queues[i];
> + if (!vqrx)
> + continue;
> + memset(&vqrx->stats, 0, sizeof(vqrx->stats));
> + }
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + vqtx = dev->data->tx_queues[i];
> + if (!vqtx)
> + continue;
> + memset(&vqtx->stats, 0, sizeof(vqtx->stats));
> + }
> +}
> +
> +static int
> +vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
> + struct rte_eth_xstat_name *xstats_names,
> + unsigned int limit __rte_unused)
> +{
> + unsigned int i = 0;
> + unsigned int t = 0;
> + int count = 0;
> + int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> + + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
> +
> + if (!xstats_names)
> + return nstats;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + if (!dev->data->rx_queues[i])
> + continue;
> + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> + snprintf(xstats_names[count].name,
> + sizeof(xstats_names[count].name),
> + "rx_q%u_%s", i,
> + vhost_rxq_stat_strings[t].name);
> + count++;
> + }
> + }
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + if (!dev->data->tx_queues[i])
> + continue;
> + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> + snprintf(xstats_names[count].name,
> + sizeof(xstats_names[count].name),
> + "tx_q%u_%s", i,
> + vhost_txq_stat_strings[t].name);
> + count++;
> + }
> + }
> + return count;
> +}
> +
> +static int
> +vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
> + unsigned int n)
> +{
> + unsigned int i;
> + unsigned int t;
> + unsigned int count = 0;
> + unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> + + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
> +
> + if (n < nxstats)
> + return nxstats;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + if (!dev->data->rx_queues[i])
> + continue;
> + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> + xstats[count].value =
> + *(uint64_t *)(((char *)dev->data->rx_queues[i])
> + + vhost_rxq_stat_strings[t].offset);
> + count++;
> + }
> + }
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + if (!dev->data->tx_queues[i])
> + continue;
> + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> + xstats[count].value =
> + *(uint64_t *)(((char *)dev->data->tx_queues[i])
> + + vhost_txq_stat_strings[t].offset);
> + count++;
> + }
> + }
> + return count;
> +}
> +static void
> +vhost_count_multicast_broadcast(struct vhost_queue *vq,
> + struct rte_mbuf **bufs,
I would let this function to count one mbuf, so that --->
> + uint16_t begin,
> + uint16_t end)
> +{
> + uint64_t i = 0;
> + struct ether_addr *ea = NULL;
> + struct vhost_stats *pstats = &vq->stats;
> +
> + for (i = begin; i < end; i++) {
> + ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
> + if (is_multicast_ether_addr(ea)) {
> + if (is_broadcast_ether_addr(ea))
> + pstats->xstats[VHOST_BROADCAST_PKT]++;
> + else
> + pstats->xstats[VHOST_MULTICAST_PKT]++;
> + }
> + }
> +}
> +
> +static void
> +vhost_update_packet_xstats(struct vhost_queue *vq,
> + struct rte_mbuf **bufs,
> + uint16_t nb_rxtx)
> +{
> + uint32_t pkt_len = 0;
> + uint64_t i = 0;
> + uint64_t index;
> + struct vhost_stats *pstats = &vq->stats;
> +
> + for (i = 0; i < nb_rxtx ; i++) {
> + pkt_len = bufs[i]->pkt_len;
> + if (pkt_len == 64) {
> + pstats->xstats[VHOST_64_PKT]++;
> + } else if (pkt_len > 64 && pkt_len < 1024) {
> + index = (sizeof(pkt_len) * 8)
> + - __builtin_clz(pkt_len) - 5;
> + pstats->xstats[index]++;
> + } else {
> + if (pkt_len < 64)
> + pstats->xstats[VHOST_UNDERSIZE_PKT]++;
> + else if (pkt_len <= 1522)
> + pstats->xstats[VHOST_1024_TO_1522_PKT]++;
> + else if (pkt_len > 1522)
> + pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
> + }
... you could put it here, and save another for() loop introdueced by
vhost_count_multicast_broadcast().
BTW, I will use simple vars, say "count", but not "nb_rxtx".
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats Zhiyong Yang
2016-09-20 10:56 ` Yuanhan Liu
@ 2016-09-20 11:50 ` Yuanhan Liu
2016-09-21 6:15 ` Yang, Zhiyong
1 sibling, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-20 11:50 UTC (permalink / raw)
To: Zhiyong Yang; +Cc: dev, thomas.monjalon, pmatilai, harry.van.haaren
On Tue, Sep 20, 2016 at 05:36:45PM +0800, Zhiyong Yang wrote:
> +enum vhost_xstats_pkts {
> + VHOST_UNDERSIZE_PKT = 0,
> + VHOST_64_PKT,
> + VHOST_65_TO_127_PKT,
> + VHOST_128_TO_255_PKT,
> + VHOST_256_TO_511_PKT,
> + VHOST_512_TO_1023_PKT,
> + VHOST_1024_TO_1522_PKT,
> + VHOST_1523_TO_MAX_PKT,
> + VHOST_BROADCAST_PKT,
> + VHOST_MULTICAST_PKT,
Another thing I noted is that you dropped unitcast counting here. I
think the comment I gave in last email was you don't have to update
it every time we rx/tx packets, instead, you could calcuate it on
query (at vhost_dev_xstats_get). I was not asking you to drop it.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct
2016-09-20 10:44 ` Yuanhan Liu
@ 2016-09-21 5:12 ` Yang, Zhiyong
0 siblings, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-21 5:12 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, thomas.monjalon, pmatilai, Van Haaren, Harry
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Tuesday, September 20, 2016 6:44 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>
> Subject: Re: [PATCH v3 1/2] net/vhost: add a new stats struct
>
> On Tue, Sep 20, 2016 at 05:36:44PM +0800, Zhiyong Yang wrote:
> > The patch moves all stats counters to a new defined struct vhost_stats
> > as follows, in order to manage all stats counters in a unified way and
> > simplify the subsequent function
> implementation(vhost_dev_xstats_reset).
> >
> > struct vhost_stats {
> > uint64_t rx_pkts;
> > uint64_t tx_pkts;
>
> As I mentioned in last review (you may not notice it though), there is no
> need to introduce rx_pkts and tx_pkts: only one of them will be used for a
> specific queue. You could just use "pkts".
>
>
> > uint64_t missed_pkts;
> > uint64_t rx_bytes;
> > uint64_t tx_bytes;
>
> Ditto.
Ok. You are right. The definition after modification will be :
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
>
> --yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats
2016-09-20 11:50 ` Yuanhan Liu
@ 2016-09-21 6:15 ` Yang, Zhiyong
0 siblings, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-21 6:15 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, thomas.monjalon, pmatilai, Van Haaren, Harry
Hi, yuanhan:
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Tuesday, September 20, 2016 7:50 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>
> Subject: Re: [PATCH v3 2/2] net/vhost: add pmd xstats
>
> On Tue, Sep 20, 2016 at 05:36:45PM +0800, Zhiyong Yang wrote:
> > +enum vhost_xstats_pkts {
> > + VHOST_UNDERSIZE_PKT = 0,
> > + VHOST_64_PKT,
> > + VHOST_65_TO_127_PKT,
> > + VHOST_128_TO_255_PKT,
> > + VHOST_256_TO_511_PKT,
> > + VHOST_512_TO_1023_PKT,
> > + VHOST_1024_TO_1522_PKT,
> > + VHOST_1523_TO_MAX_PKT,
> > + VHOST_BROADCAST_PKT,
> > + VHOST_MULTICAST_PKT,
>
> Another thing I noted is that you dropped unitcast counting here. I think the
> comment I gave in last email was you don't have to update it every time we
> rx/tx packets, instead, you could calcuate it on query (at
> vhost_dev_xstats_get). I was not asking you to drop it.
>
Ok, I'm sorry to misunderstand you and will restore it in next V4 patch
as you suggest.
> --yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats
2016-09-20 10:56 ` Yuanhan Liu
@ 2016-09-21 7:22 ` Yang, Zhiyong
0 siblings, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-21 7:22 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, thomas.monjalon, pmatilai, Van Haaren, Harry
Hi, yuanhan:
I will fix your comments in V4 patch.
Thanks
--Zhiyong
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Tuesday, September 20, 2016 6:57 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; thomas.monjalon@6wind.com; pmatilai@redhat.com;
> Van Haaren, Harry <harry.van.haaren@intel.com>
> Subject: Re: [PATCH v3 2/2] net/vhost: add pmd xstats
>
> On Tue, Sep 20, 2016 at 05:36:45PM +0800, Zhiyong Yang wrote:
> > diff --git a/drivers/net/vhost/rte_eth_vhost.c
> > b/drivers/net/vhost/rte_eth_vhost.c
> > index 9157bf1..c3f404d 100644
> > --- a/drivers/net/vhost/rte_eth_vhost.c
> > +++ b/drivers/net/vhost/rte_eth_vhost.c
> > @@ -78,6 +78,7 @@ struct vhost_stats {
> > uint64_t missed_pkts;
> > uint64_t rx_bytes;
> > uint64_t tx_bytes;
> > + uint64_t xstats[16];
> > };
> >
> > struct vhost_queue {
> > @@ -131,6 +132,252 @@ struct rte_vhost_vring_state {
> >
> > static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
> >
> > +enum vhost_xstats_pkts {
> > + VHOST_UNDERSIZE_PKT = 0,
> > + VHOST_64_PKT,
> > + VHOST_65_TO_127_PKT,
> > + VHOST_128_TO_255_PKT,
> > + VHOST_256_TO_511_PKT,
> > + VHOST_512_TO_1023_PKT,
> > + VHOST_1024_TO_1522_PKT,
> > + VHOST_1523_TO_MAX_PKT,
> > + VHOST_BROADCAST_PKT,
> > + VHOST_MULTICAST_PKT,
> > + VHOST_ERRORS_PKT,
> > + VHOST_ERRORS_FRAGMENTED,
> > + VHOST_ERRORS_JABBER,
> > + VHOST_UNKNOWN_PROTOCOL,
> > +};
>
> The definetion should go before "struct vhost_stats".
>
> And VHOST_UNKNOWN_PROTOCOL should be VHOST_XSTATS_MAX. With
> that, the hardcoded number "16" could be avoided and replaced by it.
>
Ok.
> > +
> > +#define VHOST_XSTATS_NAME_SIZE 64
> > +
> > +struct vhost_xstats_name_off {
> > + char name[VHOST_XSTATS_NAME_SIZE];
> > + uint64_t offset;
> > +};
> > +
> > +/* [rt]_qX_ is prepended to the name string here */ static const
> > +struct vhost_xstats_name_off vhost_rxq_stat_strings[] = {
> > + {"good_packets",
> > + offsetof(struct vhost_queue, stats.rx_pkts)},
> > + {"total_bytes",
> > + offsetof(struct vhost_queue, stats.rx_bytes)},
> > + {"missed_pkts",
> > + offsetof(struct vhost_queue, stats.missed_pkts)},
> > + {"broadcast_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_BROADCAST_PKT])},
> > + {"multicast_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_MULTICAST_PKT])},
> > + {"undersize_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_UNDERSIZE_PKT])},
> > + {"size_64_packets",
> > + offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> > + {"size_65_to_127_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_65_TO_127_PKT])},
> > + {"size_128_to_255_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_128_TO_255_PKT])},
> > + {"size_256_to_511_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_256_TO_511_PKT])},
> > + {"size_512_to_1023_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_512_TO_1023_PKT])},
> > + {"size_1024_to_1522_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_1024_TO_1522_PKT])},
> > + {"size_1523_to_max_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_1523_TO_MAX_PKT])},
> > + {"errors_with_bad_CRC",
> > + offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
> > + {"fragmented_errors",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_ERRORS_FRAGMENTED])},
> > + {"jabber_errors",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_ERRORS_JABBER])},
> > + {"unknown_protos_packets",
> > + offsetof(struct vhost_queue,
> > +stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
>
> Hmm, is "unknown_protos_packets" a valid stats? Why we should handle it
> here? Besides, it will always not be accessed, right? I mean,
> VHOST_NB_RXQ_XSTATS makes sure that.
>
Yes, unknown_protos_packets is not accessed and always zero.
I check the code that I40e driver implements similar counter by NIC register.
I introduce It to want vhost xstats look like physical NIC for the applications
According to RFC2863 Section ifInUnknownProtos.
"For packet-oriented interfaces, the number of packets received via
the interface which were discarded because of an unknown or
unsupported protocol. For character-oriented or fixed-length
interfaces that support protocol multiplexing the number of
transmission units received via the interface which were discarded
because of an unknown or unsupported protocol. For any interface
that does not support protocol multiplexing, this counter will always be 0.
> > +
> > +/* [tx]_qX_ is prepended to the name string here */ static const
> > +struct vhost_xstats_name_off vhost_txq_stat_strings[] = {
> > + {"good_packets",
> > + offsetof(struct vhost_queue, stats.tx_pkts)},
> > + {"total_bytes",
> > + offsetof(struct vhost_queue, stats.tx_bytes)},
> > + {"missed_pkts",
> > + offsetof(struct vhost_queue, stats.missed_pkts)},
> > + {"broadcast_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_BROADCAST_PKT])},
> > + {"multicast_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_MULTICAST_PKT])},
> > + {"size_64_packets",
> > + offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> > + {"size_65_to_127_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_65_TO_127_PKT])},
> > + {"size_128_to_255_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_128_TO_255_PKT])},
> > + {"size_256_to_511_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_256_TO_511_PKT])},
> > + {"size_512_to_1023_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_512_TO_1023_PKT])},
> > + {"size_1024_to_1522_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_1024_TO_1522_PKT])},
> > + {"size_1523_to_max_packets",
> > + offsetof(struct vhost_queue,
> stats.xstats[VHOST_1523_TO_MAX_PKT])},
> > + {"errors_with_bad_CRC",
> > + offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])}, };
> > +
> > +#define VHOST_NB_RXQ_XSTATS (sizeof(vhost_rxq_stat_strings) / \
> > + sizeof(vhost_rxq_stat_strings[0]))
> > +
> > +#define VHOST_NB_TXQ_XSTATS (sizeof(vhost_txq_stat_strings) / \
> > + sizeof(vhost_txq_stat_strings[0]))
> > +
> > +static void
> > +vhost_dev_xstats_reset(struct rte_eth_dev *dev) {
> > + struct vhost_queue *vqrx = NULL;
> > + struct vhost_queue *vqtx = NULL;
>
> I will not introduce two var here: I'd just define one, vq.
>
Ok
> > + unsigned int i = 0;
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > + vqrx = dev->data->rx_queues[i];
> > + if (!vqrx)
> > + continue;
> > + memset(&vqrx->stats, 0, sizeof(vqrx->stats));
> > + }
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > + vqtx = dev->data->tx_queues[i];
> > + if (!vqtx)
> > + continue;
> > + memset(&vqtx->stats, 0, sizeof(vqtx->stats));
> > + }
> > +}
> > +
> > +static int
> > +vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
> > + struct rte_eth_xstat_name *xstats_names,
> > + unsigned int limit __rte_unused) {
> > + unsigned int i = 0;
> > + unsigned int t = 0;
> > + int count = 0;
> > + int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
> > + + dev->data->nb_tx_queues *
> VHOST_NB_TXQ_XSTATS;
> > +
> > + if (!xstats_names)
> > + return nstats;
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > + if (!dev->data->rx_queues[i])
> > + continue;
> > + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> > + snprintf(xstats_names[count].name,
> > + sizeof(xstats_names[count].name),
> > + "rx_q%u_%s", i,
> > + vhost_rxq_stat_strings[t].name);
> > + count++;
> > + }
> > + }
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > + if (!dev->data->tx_queues[i])
> > + continue;
> > + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> > + snprintf(xstats_names[count].name,
> > + sizeof(xstats_names[count].name),
> > + "tx_q%u_%s", i,
> > + vhost_txq_stat_strings[t].name);
> > + count++;
> > + }
> > + }
> > + return count;
> > +}
> > +
> > +static int
> > +vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat
> *xstats,
> > + unsigned int n)
> > +{
> > + unsigned int i;
> > + unsigned int t;
> > + unsigned int count = 0;
> > + unsigned int nxstats = dev->data->nb_rx_queues *
> VHOST_NB_RXQ_XSTATS
> > + + dev->data->nb_tx_queues *
> VHOST_NB_TXQ_XSTATS;
> > +
> > + if (n < nxstats)
> > + return nxstats;
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > + if (!dev->data->rx_queues[i])
> > + continue;
> > + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
> > + xstats[count].value =
> > + *(uint64_t *)(((char *)dev->data-
> >rx_queues[i])
> > + + vhost_rxq_stat_strings[t].offset);
> > + count++;
> > + }
> > + }
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > + if (!dev->data->tx_queues[i])
> > + continue;
> > + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
> > + xstats[count].value =
> > + *(uint64_t *)(((char *)dev->data-
> >tx_queues[i])
> > + + vhost_txq_stat_strings[t].offset);
> > + count++;
> > + }
> > + }
> > + return count;
> > +}
>
> > +static void
> > +vhost_count_multicast_broadcast(struct vhost_queue *vq,
> > + struct rte_mbuf **bufs,
>
> I would let this function to count one mbuf, so that --->
>
> > + uint16_t begin,
> > + uint16_t end)
> > +{
> > + uint64_t i = 0;
> > + struct ether_addr *ea = NULL;
> > + struct vhost_stats *pstats = &vq->stats;
> > +
> > + for (i = begin; i < end; i++) {
> > + ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
> > + if (is_multicast_ether_addr(ea)) {
> > + if (is_broadcast_ether_addr(ea))
> > + pstats->xstats[VHOST_BROADCAST_PKT]++;
> > + else
> > + pstats->xstats[VHOST_MULTICAST_PKT]++;
> > + }
> > + }
> > +}
> > +
> > +static void
> > +vhost_update_packet_xstats(struct vhost_queue *vq,
> > + struct rte_mbuf **bufs,
> > + uint16_t nb_rxtx)
> > +{
> > + uint32_t pkt_len = 0;
> > + uint64_t i = 0;
> > + uint64_t index;
> > + struct vhost_stats *pstats = &vq->stats;
> > +
> > + for (i = 0; i < nb_rxtx ; i++) {
> > + pkt_len = bufs[i]->pkt_len;
> > + if (pkt_len == 64) {
> > + pstats->xstats[VHOST_64_PKT]++;
> > + } else if (pkt_len > 64 && pkt_len < 1024) {
> > + index = (sizeof(pkt_len) * 8)
> > + - __builtin_clz(pkt_len) - 5;
> > + pstats->xstats[index]++;
> > + } else {
> > + if (pkt_len < 64)
> > + pstats->xstats[VHOST_UNDERSIZE_PKT]++;
> > + else if (pkt_len <= 1522)
> > + pstats-
> >xstats[VHOST_1024_TO_1522_PKT]++;
> > + else if (pkt_len > 1522)
> > + pstats-
> >xstats[VHOST_1523_TO_MAX_PKT]++;
> > + }
>
> ... you could put it here, and save another for() loop introdueced by
> vhost_count_multicast_broadcast().
>
> BTW, I will use simple vars, say "count", but not "nb_rxtx".
Ok.
>
> --yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v4 0/2] net/vhost: add pmd xstats support
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct Zhiyong Yang
2016-09-20 10:44 ` Yuanhan Liu
@ 2016-09-21 10:05 ` Zhiyong Yang
2016-09-21 10:05 ` Zhiyong Yang
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 2/2] net/vhost: add pmd xstats Zhiyong Yang
2016-09-21 10:13 ` [dpdk-dev] [PATCH v4 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2 siblings, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-21 10:05 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, harry.van.haaren, thomas.monjalon, pmatilai
Patch 1 moves all stats counters to a new defined struct vhost_stats,
in order to manage all stats counters in a consistent way.
Changes in v4:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
Before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
Patch 2 adds the pmd xstats support.
Changes in v4:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
---
Zhiyong Yang (2):
net/vhost: add a new defined stats struct
net/vhost: add pmd xstats
drivers/net/vhost/rte_eth_vhost.c | 316 +++++++++++++++++++++++++++++++++++---
1 file changed, 296 insertions(+), 20 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v4 0/2] net/vhost: add pmd xstats support
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 0/2] net/vhost: add pmd xstats support Zhiyong Yang
@ 2016-09-21 10:05 ` Zhiyong Yang
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 2/2] net/vhost: add pmd xstats Zhiyong Yang
1 sibling, 0 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-21 10:05 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, harry.van.haaren, thomas.monjalon, pmatilai, Zhiyong Yang
Subject: [PATCH v4 1/2] net/vhost: add a new defined stats struct
The patch moves all stats counters to a new defined struct vhost_stats
as follows, in order to manage all stats counters in a unified way and
simplify the subsequent function implementation(vhost_dev_xstats_reset).
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v4:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
drivers/net/vhost/rte_eth_vhost.c | 42 ++++++++++++++++++++-------------------
1 file changed, 22 insertions(+), 20 deletions(-)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 80c3f4c..7b989ec 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,6 +72,12 @@ static struct ether_addr base_eth_addr = {
}
};
+struct vhost_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t missed_pkts;
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -80,11 +86,7 @@ struct vhost_queue {
struct rte_mempool *mb_pool;
uint8_t port;
uint16_t virtqueue_id;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
- uint64_t missed_pkts;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
+ struct vhost_stats stats;
};
struct pmd_internal {
@@ -145,11 +147,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
- r->rx_pkts += nb_rx;
+ r->stats.pkts += nb_rx;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- r->rx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
}
out:
@@ -176,11 +178,11 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
- r->tx_pkts += nb_tx;
- r->missed_pkts += nb_bufs - nb_tx;
+ r->stats.pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
for (i = 0; likely(i < nb_tx); i++)
- r->tx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
@@ -582,10 +584,10 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = vq->rx_pkts;
+ stats->q_ipackets[i] = vq->stats.pkts;
rx_total += stats->q_ipackets[i];
- stats->q_ibytes[i] = vq->rx_bytes;
+ stats->q_ibytes[i] = vq->stats.bytes;
rx_total_bytes += stats->q_ibytes[i];
}
@@ -594,11 +596,11 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- stats->q_opackets[i] = vq->tx_pkts;
- tx_missed_total += vq->missed_pkts;
+ stats->q_opackets[i] = vq->stats.pkts;
+ tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
- stats->q_obytes[i] = vq->tx_bytes;
+ stats->q_obytes[i] = vq->stats.bytes;
tx_total_bytes += stats->q_obytes[i];
}
@@ -619,16 +621,16 @@ eth_stats_reset(struct rte_eth_dev *dev)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- vq->rx_pkts = 0;
- vq->rx_bytes = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- vq->tx_pkts = 0;
- vq->tx_bytes = 0;
- vq->missed_pkts = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ vq->stats.missed_pkts = 0;
}
}
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v4 2/2] net/vhost: add pmd xstats
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-21 10:05 ` Zhiyong Yang
@ 2016-09-21 10:05 ` Zhiyong Yang
2016-09-21 10:57 ` Yuanhan Liu
1 sibling, 1 reply; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-21 10:05 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, harry.van.haaren, thomas.monjalon, pmatilai, Zhiyong Yang
This feature adds vhost pmd extended statistics from per queue perspective
in order to meet the requirements of the applications such as OVS etc.
The statistics counters are based on RFC 2819 and RFC 2863 as follows:
rx/tx_good_packets
rx/tx_total_bytes
rx/tx_missed_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_unicast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;
No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics.
The usage of vhost pmd xstats is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:
show port xstats all | port_id will show vhost xstats
clear port xstats all | port_id will reset vhost xstats
net/virtio pmd xstats(the function virtio_update_packet_stats) is used
as reference when implementing the feature.
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v4:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
drivers/net/vhost/rte_eth_vhost.c | 274 ++++++++++++++++++++++++++++++++++++++
1 file changed, 274 insertions(+)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7b989ec..9ab47d1 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,10 +72,30 @@ static struct ether_addr base_eth_addr = {
}
};
+enum vhost_xstats_pkts {
+ VHOST_UNDERSIZE_PKT = 0,
+ VHOST_64_PKT,
+ VHOST_65_TO_127_PKT,
+ VHOST_128_TO_255_PKT,
+ VHOST_256_TO_511_PKT,
+ VHOST_512_TO_1023_PKT,
+ VHOST_1024_TO_1522_PKT,
+ VHOST_1523_TO_MAX_PKT,
+ VHOST_BROADCAST_PKT,
+ VHOST_MULTICAST_PKT,
+ VHOST_UNICAST_PKT,
+ VHOST_ERRORS_PKT,
+ VHOST_ERRORS_FRAGMENTED,
+ VHOST_ERRORS_JABBER,
+ VHOST_UNKNOWN_PROTOCOL,
+ VHOST_XSTATS_MAX,
+};
+
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
+ uint64_t xstats[VHOST_XSTATS_MAX];
};
struct vhost_queue {
@@ -129,6 +149,245 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+ char name[VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rt]_qX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_qX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_RXQ_XSTATS (sizeof(vhost_rxq_stat_strings) / \
+ sizeof(vhost_rxq_stat_strings[0]))
+
+#define VHOST_NB_TXQ_XSTATS (sizeof(vhost_txq_stat_strings) / \
+ sizeof(vhost_txq_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vq = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit __rte_unused)
+{
+ unsigned int i = 0;
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (!xstats_names)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ vhost_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ vhost_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+ return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+ struct vhost_queue *vq = NULL;
+ unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)vq)
+ + vhost_rxq_stat_strings[t].offset);
+ count++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ + vq->stats.missed_pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)vq)
+ + vhost_txq_stat_strings[t].offset);
+ count++;
+ }
+ }
+ return count;
+}
+
+static inline void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t count)
+{
+ struct ether_addr *ea = NULL;
+ struct vhost_stats *pstats = &vq->stats;
+
+ ea = rte_pktmbuf_mtod(bufs[count], struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ pstats->xstats[VHOST_BROADCAST_PKT]++;
+ else
+ pstats->xstats[VHOST_MULTICAST_PKT]++;
+ }
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t count)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = 0; i < count ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ pstats->xstats[VHOST_64_PKT]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ pstats->xstats[index]++;
+ } else {
+ if (pkt_len < 64)
+ pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+ else if (pkt_len <= 1522)
+ pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+ else if (pkt_len > 1522)
+ pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+ }
+ vhost_count_multicast_broadcast(vq, bufs, i);
+ }
+}
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -154,6 +413,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
r->stats.bytes += bufs[i]->pkt_len;
}
+ vhost_update_packet_xstats(r, bufs, nb_rx);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -184,8 +445,18 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_tx); i++)
r->stats.bytes += bufs[i]->pkt_len;
+ vhost_update_packet_xstats(r, bufs, nb_tx);
+
+ /* According to RFC2863 page42 section ifHCOutMulticastPkts and
+ * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+ * are increased when packets are not transmitted successfully.
+ */
+ for (i = nb_tx; i < nb_bufs; i++)
+ vhost_count_multicast_broadcast(r, bufs, i);
+
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -684,6 +955,9 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
};
static int
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v4 1/2] net/vhost: add a new defined stats struct
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct Zhiyong Yang
2016-09-20 10:44 ` Yuanhan Liu
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 0/2] net/vhost: add pmd xstats support Zhiyong Yang
@ 2016-09-21 10:13 ` Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2 siblings, 1 reply; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-21 10:13 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, harry.van.haaren, thomas.monjalon, pmatilai, Zhiyong Yang
The patch moves all stats counters to a new defined struct vhost_stats
as follows, in order to manage all stats counters in a unified way and
simplify the subsequent function implementation(vhost_dev_xstats_reset).
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v4:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
drivers/net/vhost/rte_eth_vhost.c | 42 ++++++++++++++++++++-------------------
1 file changed, 22 insertions(+), 20 deletions(-)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 80c3f4c..7b989ec 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,6 +72,12 @@ static struct ether_addr base_eth_addr = {
}
};
+struct vhost_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t missed_pkts;
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -80,11 +86,7 @@ struct vhost_queue {
struct rte_mempool *mb_pool;
uint8_t port;
uint16_t virtqueue_id;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
- uint64_t missed_pkts;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
+ struct vhost_stats stats;
};
struct pmd_internal {
@@ -145,11 +147,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
- r->rx_pkts += nb_rx;
+ r->stats.pkts += nb_rx;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- r->rx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
}
out:
@@ -176,11 +178,11 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
- r->tx_pkts += nb_tx;
- r->missed_pkts += nb_bufs - nb_tx;
+ r->stats.pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
for (i = 0; likely(i < nb_tx); i++)
- r->tx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
@@ -582,10 +584,10 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = vq->rx_pkts;
+ stats->q_ipackets[i] = vq->stats.pkts;
rx_total += stats->q_ipackets[i];
- stats->q_ibytes[i] = vq->rx_bytes;
+ stats->q_ibytes[i] = vq->stats.bytes;
rx_total_bytes += stats->q_ibytes[i];
}
@@ -594,11 +596,11 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- stats->q_opackets[i] = vq->tx_pkts;
- tx_missed_total += vq->missed_pkts;
+ stats->q_opackets[i] = vq->stats.pkts;
+ tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
- stats->q_obytes[i] = vq->tx_bytes;
+ stats->q_obytes[i] = vq->stats.bytes;
tx_total_bytes += stats->q_obytes[i];
}
@@ -619,16 +621,16 @@ eth_stats_reset(struct rte_eth_dev *dev)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- vq->rx_pkts = 0;
- vq->rx_bytes = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- vq->tx_pkts = 0;
- vq->tx_bytes = 0;
- vq->missed_pkts = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ vq->stats.missed_pkts = 0;
}
}
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v4 2/2] net/vhost: add pmd xstats
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 2/2] net/vhost: add pmd xstats Zhiyong Yang
@ 2016-09-21 10:57 ` Yuanhan Liu
2016-09-22 1:42 ` Yang, Zhiyong
0 siblings, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-21 10:57 UTC (permalink / raw)
To: Zhiyong Yang; +Cc: dev, harry.van.haaren, thomas.monjalon, pmatilai
On Wed, Sep 21, 2016 at 06:05:55PM +0800, Zhiyong Yang wrote:
> +static inline void
> +vhost_count_multicast_broadcast(struct vhost_queue *vq,
> + struct rte_mbuf **bufs,
> + uint16_t count)
Hmm.. why not just passing "struct rte_mbuf *mbuf"?
--yliu
> +{
> + struct ether_addr *ea = NULL;
> + struct vhost_stats *pstats = &vq->stats;
> +
> + ea = rte_pktmbuf_mtod(bufs[count], struct ether_addr *);
> + if (is_multicast_ether_addr(ea)) {
> + if (is_broadcast_ether_addr(ea))
> + pstats->xstats[VHOST_BROADCAST_PKT]++;
> + else
> + pstats->xstats[VHOST_MULTICAST_PKT]++;
> + }
> +}
> +
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v4 2/2] net/vhost: add pmd xstats
2016-09-21 10:57 ` Yuanhan Liu
@ 2016-09-22 1:42 ` Yang, Zhiyong
2016-09-22 2:09 ` Yuanhan Liu
0 siblings, 1 reply; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-22 1:42 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, Van Haaren, Harry, thomas.monjalon, pmatilai
hi, yuanhan:
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Wednesday, September 21, 2016 6:57 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; Van Haaren, Harry <harry.van.haaren@intel.com>;
> thomas.monjalon@6wind.com; pmatilai@redhat.com
> Subject: Re: [PATCH v4 2/2] net/vhost: add pmd xstats
>
> On Wed, Sep 21, 2016 at 06:05:55PM +0800, Zhiyong Yang wrote:
> > +static inline void
> > +vhost_count_multicast_broadcast(struct vhost_queue *vq,
> > + struct rte_mbuf **bufs,
> > + uint16_t count)
>
> Hmm.. why not just passing "struct rte_mbuf *mbuf"?
>
Passing "struct rte_mbuf *mbuf" can reduce one parameter. But the function is small
and frequently invoked. So, I define it as the inline function, I think the two
types of definitions should have the same performance for inline function.
Should I modify it in next patch?
--zhiyong
> --yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v4 2/2] net/vhost: add pmd xstats
2016-09-22 1:42 ` Yang, Zhiyong
@ 2016-09-22 2:09 ` Yuanhan Liu
0 siblings, 0 replies; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-22 2:09 UTC (permalink / raw)
To: Yang, Zhiyong; +Cc: dev, Van Haaren, Harry, thomas.monjalon, pmatilai
On Thu, Sep 22, 2016 at 01:42:14AM +0000, Yang, Zhiyong wrote:
> hi, yuanhan:
>
> > -----Original Message-----
> > From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> > Sent: Wednesday, September 21, 2016 6:57 PM
> > To: Yang, Zhiyong <zhiyong.yang@intel.com>
> > Cc: dev@dpdk.org; Van Haaren, Harry <harry.van.haaren@intel.com>;
> > thomas.monjalon@6wind.com; pmatilai@redhat.com
> > Subject: Re: [PATCH v4 2/2] net/vhost: add pmd xstats
> >
> > On Wed, Sep 21, 2016 at 06:05:55PM +0800, Zhiyong Yang wrote:
> > > +static inline void
> > > +vhost_count_multicast_broadcast(struct vhost_queue *vq,
> > > + struct rte_mbuf **bufs,
> > > + uint16_t count)
> >
> > Hmm.. why not just passing "struct rte_mbuf *mbuf"?
> >
>
> Passing "struct rte_mbuf *mbuf" can reduce one parameter. But the function is small
> and frequently invoked. So, I define it as the inline function, I think the two
> types of definitions should have the same performance for inline function.
> Should I modify it in next patch?
Yes, since it's simpler. Besides, "count" doesn't make sense, judging
that you just do one mbuf counting here.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support
2016-09-21 10:13 ` [dpdk-dev] [PATCH v4 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
@ 2016-09-22 8:19 ` Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
` (2 more replies)
0 siblings, 3 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-22 8:19 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, thomas.monjalon, pmatilai, harry.van.haaren
Patch 1 moves all stats counters to a new defined struct vhost_stats,
in order to manage all stats counters in a consistent way.
Patch 2 adds the pmd xstats support.
Changes in V5:
Patch 2:
for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
Changes in v4:
Patch 1:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
Before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
Patch 2:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
---
Zhiyong Yang (2):
net/vhost: add a new defined stats struct
net/vhost: add pmd xstats
drivers/net/vhost/rte_eth_vhost.c | 315 +++++++++++++++++++++++++++++++++++---
1 file changed, 295 insertions(+), 20 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v5 1/2] net/vhost: add a new defined stats struct
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Zhiyong Yang
@ 2016-09-22 8:19 ` Zhiyong Yang
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 " Zhiyong Yang
2016-09-23 3:56 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Yuanhan Liu
2 siblings, 1 reply; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-22 8:19 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, thomas.monjalon, pmatilai, harry.van.haaren, Zhiyong Yang
The patch moves all stats counters to a new defined struct vhost_stats
as follows, in order to manage all stats counters in a unified way and
simplify the subsequent function implementation(vhost_dev_xstats_reset).
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v4:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
drivers/net/vhost/rte_eth_vhost.c | 42 ++++++++++++++++++++-------------------
1 file changed, 22 insertions(+), 20 deletions(-)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 80c3f4c..7b989ec 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,6 +72,12 @@ static struct ether_addr base_eth_addr = {
}
};
+struct vhost_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t missed_pkts;
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -80,11 +86,7 @@ struct vhost_queue {
struct rte_mempool *mb_pool;
uint8_t port;
uint16_t virtqueue_id;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
- uint64_t missed_pkts;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
+ struct vhost_stats stats;
};
struct pmd_internal {
@@ -145,11 +147,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
- r->rx_pkts += nb_rx;
+ r->stats.pkts += nb_rx;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- r->rx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
}
out:
@@ -176,11 +178,11 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
- r->tx_pkts += nb_tx;
- r->missed_pkts += nb_bufs - nb_tx;
+ r->stats.pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
for (i = 0; likely(i < nb_tx); i++)
- r->tx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
@@ -582,10 +584,10 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = vq->rx_pkts;
+ stats->q_ipackets[i] = vq->stats.pkts;
rx_total += stats->q_ipackets[i];
- stats->q_ibytes[i] = vq->rx_bytes;
+ stats->q_ibytes[i] = vq->stats.bytes;
rx_total_bytes += stats->q_ibytes[i];
}
@@ -594,11 +596,11 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- stats->q_opackets[i] = vq->tx_pkts;
- tx_missed_total += vq->missed_pkts;
+ stats->q_opackets[i] = vq->stats.pkts;
+ tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
- stats->q_obytes[i] = vq->tx_bytes;
+ stats->q_obytes[i] = vq->stats.bytes;
tx_total_bytes += stats->q_obytes[i];
}
@@ -619,16 +621,16 @@ eth_stats_reset(struct rte_eth_dev *dev)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- vq->rx_pkts = 0;
- vq->rx_bytes = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- vq->tx_pkts = 0;
- vq->tx_bytes = 0;
- vq->missed_pkts = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ vq->stats.missed_pkts = 0;
}
}
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v5 2/2] net/vhost: add pmd xstats
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
@ 2016-09-22 8:19 ` Zhiyong Yang
2016-09-23 3:56 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Yuanhan Liu
2 siblings, 0 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-22 8:19 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, thomas.monjalon, pmatilai, harry.van.haaren, Zhiyong Yang
This feature adds vhost pmd extended statistics from per queue perspective
in order to meet the requirements of the applications such as OVS etc.
The statistics counters are based on RFC 2819 and RFC 2863 as follows:
rx/tx_good_packets
rx/tx_total_bytes
rx/tx_missed_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_unicast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;
No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics.
The usage of vhost pmd xstats is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:
show port xstats all | port_id will show vhost xstats
clear port xstats all | port_id will reset vhost xstats
net/virtio pmd xstats(the function virtio_update_packet_stats) is used
as reference when implementing the feature.
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in V5:
for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
Changes in v4:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
drivers/net/vhost/rte_eth_vhost.c | 273 ++++++++++++++++++++++++++++++++++++++
1 file changed, 273 insertions(+)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7b989ec..4648cea 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,10 +72,30 @@ static struct ether_addr base_eth_addr = {
}
};
+enum vhost_xstats_pkts {
+ VHOST_UNDERSIZE_PKT = 0,
+ VHOST_64_PKT,
+ VHOST_65_TO_127_PKT,
+ VHOST_128_TO_255_PKT,
+ VHOST_256_TO_511_PKT,
+ VHOST_512_TO_1023_PKT,
+ VHOST_1024_TO_1522_PKT,
+ VHOST_1523_TO_MAX_PKT,
+ VHOST_BROADCAST_PKT,
+ VHOST_MULTICAST_PKT,
+ VHOST_UNICAST_PKT,
+ VHOST_ERRORS_PKT,
+ VHOST_ERRORS_FRAGMENTED,
+ VHOST_ERRORS_JABBER,
+ VHOST_UNKNOWN_PROTOCOL,
+ VHOST_XSTATS_MAX,
+};
+
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
+ uint64_t xstats[VHOST_XSTATS_MAX];
};
struct vhost_queue {
@@ -129,6 +149,244 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+ char name[VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rt]_qX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_qX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txq_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_RXQ_XSTATS (sizeof(vhost_rxq_stat_strings) / \
+ sizeof(vhost_rxq_stat_strings[0]))
+
+#define VHOST_NB_TXQ_XSTATS (sizeof(vhost_txq_stat_strings) / \
+ sizeof(vhost_txq_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vq = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit __rte_unused)
+{
+ unsigned int i = 0;
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (!xstats_names)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ vhost_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ vhost_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+ return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+ struct vhost_queue *vq = NULL;
+ unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+ + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)vq)
+ + vhost_rxq_stat_strings[t].offset);
+ count++;
+ }
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ + vq->stats.missed_pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+ xstats[count].value =
+ *(uint64_t *)(((char *)vq)
+ + vhost_txq_stat_strings[t].offset);
+ count++;
+ }
+ }
+ return count;
+}
+
+static inline void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+ struct rte_mbuf *mbuf)
+{
+ struct ether_addr *ea = NULL;
+ struct vhost_stats *pstats = &vq->stats;
+
+ ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ pstats->xstats[VHOST_BROADCAST_PKT]++;
+ else
+ pstats->xstats[VHOST_MULTICAST_PKT]++;
+ }
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t count)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = 0; i < count ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ pstats->xstats[VHOST_64_PKT]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ pstats->xstats[index]++;
+ } else {
+ if (pkt_len < 64)
+ pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+ else if (pkt_len <= 1522)
+ pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+ else if (pkt_len > 1522)
+ pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+ }
+ vhost_count_multicast_broadcast(vq, bufs[i]);
+ }
+}
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -154,6 +412,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
r->stats.bytes += bufs[i]->pkt_len;
}
+ vhost_update_packet_xstats(r, bufs, nb_rx);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -184,8 +444,18 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_tx); i++)
r->stats.bytes += bufs[i]->pkt_len;
+ vhost_update_packet_xstats(r, bufs, nb_tx);
+
+ /* According to RFC2863 page42 section ifHCOutMulticastPkts and
+ * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+ * are increased when packets are not transmitted successfully.
+ */
+ for (i = nb_tx; i < nb_bufs; i++)
+ vhost_count_multicast_broadcast(r, bufs[i]);
+
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -684,6 +954,9 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
};
static int
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 " Zhiyong Yang
@ 2016-09-23 3:56 ` Yuanhan Liu
2016-09-28 2:35 ` Yuanhan Liu
2 siblings, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-23 3:56 UTC (permalink / raw)
To: Zhiyong Yang; +Cc: dev, thomas.monjalon, pmatilai, harry.van.haaren
On Thu, Sep 22, 2016 at 04:19:07PM +0800, Zhiyong Yang wrote:
> Patch 1 moves all stats counters to a new defined struct vhost_stats,
> in order to manage all stats counters in a consistent way.
>
> Patch 2 adds the pmd xstats support.
Applied to dpdk-next-virtio.
Thanks.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support
2016-09-23 3:56 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Yuanhan Liu
@ 2016-09-28 2:35 ` Yuanhan Liu
0 siblings, 0 replies; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-28 2:35 UTC (permalink / raw)
To: Zhiyong Yang
Cc: dev, thomas.monjalon, pmatilai, harry.van.haaren, Loftus, Ciara
On Fri, Sep 23, 2016 at 11:56:17AM +0800, Yuanhan Liu wrote:
> On Thu, Sep 22, 2016 at 04:19:07PM +0800, Zhiyong Yang wrote:
> > Patch 1 moves all stats counters to a new defined struct vhost_stats,
> > in order to manage all stats counters in a consistent way.
> >
> > Patch 2 adds the pmd xstats support.
>
> Applied to dpdk-next-virtio.
Here is a note this patchset has been dropped in dpdk-next-virtio tree:
OVS teams asks a per-port xstats, while this patch does per-queue xstats.
Zhiyong will send a new version to address this request, shortly.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v6 0/2] net/vhost: add pmd xstats support
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
@ 2016-09-28 8:33 ` Zhiyong Yang
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 " Zhiyong Yang
0 siblings, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-28 8:33 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, thomas.monjalon, pmatilai, ciara.loftus, harry.van.haaren
Patch 1 moves all stats counters to a new defined struct vhost_stats,
in order to manage all stats counters in a consistent way.
Patch 2 adds the pmd xstats support from per port perspective.
Changes in V6:
Patch 2:
1. Change xstats from per queue to per port. Keep vhost consistent with
physical NIC i40e, ixgbe, etc.
2. added the release note.
Changes in V5:
Patch 2:
for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
Changes in v4:
Patch 1:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
Before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
Patch 2:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
---
Zhiyong Yang (2):
net/vhost: add a new defined stats struct
net/vhost: add pmd xstats
doc/guides/rel_notes/release_16_11.rst | 4 +
drivers/net/vhost/rte_eth_vhost.c | 314 ++++++++++++++++++++++++++++++---
2 files changed, 298 insertions(+), 20 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v6 1/2] net/vhost: add a new defined stats struct
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 0/2] net/vhost: add pmd xstats support Zhiyong Yang
@ 2016-09-28 8:33 ` Zhiyong Yang
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 " Zhiyong Yang
1 sibling, 1 reply; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-28 8:33 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, thomas.monjalon, pmatilai, ciara.loftus,
harry.van.haaren, Zhiyong Yang
The patch moves all stats counters to a new defined struct vhost_stats
as follows, in order to manage all stats counters in a unified way and
simplify the subsequent function implementation(vhost_dev_xstats_reset).
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v4:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
drivers/net/vhost/rte_eth_vhost.c | 36 +++++++++++++++++++++---------------
1 file changed, 21 insertions(+), 15 deletions(-)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7539cd4..d99d4ee 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,6 +72,12 @@ static struct ether_addr base_eth_addr = {
}
};
+struct vhost_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t missed_pkts;
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -145,11 +151,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
- r->rx_pkts += nb_rx;
+ r->stats.pkts += nb_rx;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- r->rx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
}
out:
@@ -176,11 +182,11 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
- r->tx_pkts += nb_tx;
- r->missed_pkts += nb_bufs - nb_tx;
+ r->stats.pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
for (i = 0; likely(i < nb_tx); i++)
- r->tx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
@@ -582,10 +588,10 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = vq->rx_pkts;
+ stats->q_ipackets[i] = vq->stats.pkts;
rx_total += stats->q_ipackets[i];
- stats->q_ibytes[i] = vq->rx_bytes;
+ stats->q_ibytes[i] = vq->stats.bytes;
rx_total_bytes += stats->q_ibytes[i];
}
@@ -594,11 +600,11 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- stats->q_opackets[i] = vq->tx_pkts;
- tx_missed_total += vq->missed_pkts;
+ stats->q_opackets[i] = vq->stats.pkts;
+ tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
- stats->q_obytes[i] = vq->tx_bytes;
+ stats->q_obytes[i] = vq->stats.bytes;
tx_total_bytes += stats->q_obytes[i];
}
@@ -619,16 +625,16 @@ eth_stats_reset(struct rte_eth_dev *dev)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- vq->rx_pkts = 0;
- vq->rx_bytes = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- vq->tx_pkts = 0;
- vq->tx_bytes = 0;
- vq->missed_pkts = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ vq->stats.missed_pkts = 0;
}
}
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v6 2/2] net/vhost: add pmd xstats
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
@ 2016-09-28 8:33 ` Zhiyong Yang
1 sibling, 0 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-28 8:33 UTC (permalink / raw)
To: dev
Cc: yuanhan.liu, thomas.monjalon, pmatilai, ciara.loftus,
harry.van.haaren, Zhiyong Yang
This feature adds vhost pmd extended statistics from per port perspective
in order to meet the requirements of the applications such as OVS etc.
The statistics counters are based on RFC 2819 and RFC 2863 as follows:
rx/tx_good_packets
rx/tx_total_bytes
rx/tx_missed_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_unicast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;
No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics.
The usage of vhost pmd xstats is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:
show port xstats all | port_id will show vhost xstats
clear port xstats all | port_id will reset vhost xstats
net/virtio pmd xstats(the function virtio_update_packet_stats) is used
as reference when implementing the feature.
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in V6:
1. Change xstats from per queue to per port. Keep vhost consistent with
physical NIC i40e, ixgbe, etc.
2. added the release note.
Changes in V5:
for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
Changes in v4:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
doc/guides/rel_notes/release_16_11.rst | 4 +
drivers/net/vhost/rte_eth_vhost.c | 278 ++++++++++++++++++++++++++++++++-
2 files changed, 277 insertions(+), 5 deletions(-)
diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst
index 66916af..ae90baf 100644
--- a/doc/guides/rel_notes/release_16_11.rst
+++ b/doc/guides/rel_notes/release_16_11.rst
@@ -36,6 +36,10 @@ New Features
This section is a comment. Make sure to start the actual text at the margin.
+* **Added vhost pmd xstats support.**
+
+ Added vhost pmd extended statistics from per port perspective.
+
Resolved Issues
---------------
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index d99d4ee..d02f531 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,10 +72,30 @@ static struct ether_addr base_eth_addr = {
}
};
+enum vhost_xstats_pkts {
+ VHOST_UNDERSIZE_PKT = 0,
+ VHOST_64_PKT,
+ VHOST_65_TO_127_PKT,
+ VHOST_128_TO_255_PKT,
+ VHOST_256_TO_511_PKT,
+ VHOST_512_TO_1023_PKT,
+ VHOST_1024_TO_1522_PKT,
+ VHOST_1523_TO_MAX_PKT,
+ VHOST_BROADCAST_PKT,
+ VHOST_MULTICAST_PKT,
+ VHOST_UNICAST_PKT,
+ VHOST_ERRORS_PKT,
+ VHOST_ERRORS_FRAGMENTED,
+ VHOST_ERRORS_JABBER,
+ VHOST_UNKNOWN_PROTOCOL,
+ VHOST_XSTATS_MAX,
+};
+
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
+ uint64_t xstats[VHOST_XSTATS_MAX];
};
struct vhost_queue {
@@ -86,11 +106,7 @@ struct vhost_queue {
struct rte_mempool *mb_pool;
uint8_t port;
uint16_t virtqueue_id;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
- uint64_t missed_pkts;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
+ struct vhost_stats stats;
};
struct pmd_internal {
@@ -133,6 +149,244 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+ char name[VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rt]_portX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_portX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
+ sizeof(vhost_rxport_stat_strings[0]))
+
+#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
+ sizeof(vhost_txport_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vq = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit __rte_unused)
+{
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (!xstats_names)
+ return nstats;
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_port%u_%s", dev->data->port_id,
+ vhost_rxport_stat_strings[t].name);
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_port%u_%s", dev->data->port_id,
+ vhost_txport_stat_strings[t].name);
+ count++;
+ }
+ return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+ struct vhost_queue *vq = NULL;
+ unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ + vq->stats.missed_pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_rxport_stat_strings[t].offset);
+ }
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_txport_stat_strings[t].offset);
+ }
+ count++;
+ }
+ return count;
+}
+
+static inline void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+ struct rte_mbuf *mbuf)
+{
+ struct ether_addr *ea = NULL;
+ struct vhost_stats *pstats = &vq->stats;
+
+ ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ pstats->xstats[VHOST_BROADCAST_PKT]++;
+ else
+ pstats->xstats[VHOST_MULTICAST_PKT]++;
+ }
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t count)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = 0; i < count ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ pstats->xstats[VHOST_64_PKT]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ pstats->xstats[index]++;
+ } else {
+ if (pkt_len < 64)
+ pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+ else if (pkt_len <= 1522)
+ pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+ else if (pkt_len > 1522)
+ pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+ }
+ vhost_count_multicast_broadcast(vq, bufs[i]);
+ }
+}
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -158,6 +412,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
r->stats.bytes += bufs[i]->pkt_len;
}
+ vhost_update_packet_xstats(r, bufs, nb_rx);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -188,6 +444,15 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_tx); i++)
r->stats.bytes += bufs[i]->pkt_len;
+ vhost_update_packet_xstats(r, bufs, nb_tx);
+
+ /* According to RFC2863 page42 section ifHCOutMulticastPkts and
+ * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+ * are increased when packets are not transmitted successfully.
+ */
+ for (i = nb_tx; i < nb_bufs; i++)
+ vhost_count_multicast_broadcast(r, bufs[i]);
+
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
out:
@@ -688,6 +953,9 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
};
static int
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v7 0/2] net/vhost: add pmd xstats support
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
@ 2016-09-28 13:26 ` Zhiyong Yang
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 " Zhiyong Yang
0 siblings, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-28 13:26 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, ciara.loftus
Patch 1 moves all stats counters to a new defined struct vhost_stats,
in order to manage all stats counters in a consistent way.
Patch 2 adds the pmd xstats support from per port perspective.
Changes in V7:
Patch 2
Removed the "_portX" prepend to the xstat names. Keep vhost xstats name
consistent with physical NIC i40e, ixgbe, etc.
Changes in V6:
Patch 2:
1. Change xstats from per queue to per port. Keep vhost consistent with
physical NIC i40e, ixgbe, etc.
2. Added the release note.
Changes in V5:
Patch 2:
for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
Changes in v4:
Patch 1:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
Before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
Patch 2:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
---
Zhiyong Yang (2):
net/vhost: add a new defined stats struct
net/vhost: add pmd xstats
doc/guides/rel_notes/release_16_11.rst | 4 +
drivers/net/vhost/rte_eth_vhost.c | 312 ++++++++++++++++++++++++++++++---
2 files changed, 296 insertions(+), 20 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v7 1/2] net/vhost: add a new defined stats struct
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 0/2] net/vhost: add pmd xstats support Zhiyong Yang
@ 2016-09-28 13:26 ` Zhiyong Yang
2016-09-29 1:55 ` Yuanhan Liu
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 " Zhiyong Yang
1 sibling, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-28 13:26 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, ciara.loftus, Zhiyong Yang
The patch moves all stats counters to a new defined struct vhost_stats
as follows, in order to manage all stats counters in a unified way and
simplify the subsequent function implementation(vhost_dev_xstats_reset).
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v4:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
drivers/net/vhost/rte_eth_vhost.c | 36 +++++++++++++++++++++---------------
1 file changed, 21 insertions(+), 15 deletions(-)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7539cd4..d99d4ee 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,6 +72,12 @@ static struct ether_addr base_eth_addr = {
}
};
+struct vhost_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t missed_pkts;
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -145,11 +151,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
- r->rx_pkts += nb_rx;
+ r->stats.pkts += nb_rx;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- r->rx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
}
out:
@@ -176,11 +182,11 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
- r->tx_pkts += nb_tx;
- r->missed_pkts += nb_bufs - nb_tx;
+ r->stats.pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
for (i = 0; likely(i < nb_tx); i++)
- r->tx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
@@ -582,10 +588,10 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = vq->rx_pkts;
+ stats->q_ipackets[i] = vq->stats.pkts;
rx_total += stats->q_ipackets[i];
- stats->q_ibytes[i] = vq->rx_bytes;
+ stats->q_ibytes[i] = vq->stats.bytes;
rx_total_bytes += stats->q_ibytes[i];
}
@@ -594,11 +600,11 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- stats->q_opackets[i] = vq->tx_pkts;
- tx_missed_total += vq->missed_pkts;
+ stats->q_opackets[i] = vq->stats.pkts;
+ tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
- stats->q_obytes[i] = vq->tx_bytes;
+ stats->q_obytes[i] = vq->stats.bytes;
tx_total_bytes += stats->q_obytes[i];
}
@@ -619,16 +625,16 @@ eth_stats_reset(struct rte_eth_dev *dev)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- vq->rx_pkts = 0;
- vq->rx_bytes = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- vq->tx_pkts = 0;
- vq->tx_bytes = 0;
- vq->missed_pkts = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ vq->stats.missed_pkts = 0;
}
}
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v7 2/2] net/vhost: add pmd xstats
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
@ 2016-09-28 13:26 ` Zhiyong Yang
2016-09-29 8:48 ` Loftus, Ciara
1 sibling, 1 reply; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-28 13:26 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, ciara.loftus, Zhiyong Yang
This feature adds vhost pmd extended statistics from per port perspective
in order to meet the requirements of the applications such as OVS etc.
The statistics counters are based on RFC 2819 and RFC 2863 as follows:
rx/tx_good_packets
rx/tx_total_bytes
rx/tx_missed_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_unicast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;
No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics.
The usage of vhost pmd xstats is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:
show port xstats all | port_id will show vhost xstats
clear port xstats all | port_id will reset vhost xstats
net/virtio pmd xstats(the function virtio_update_packet_stats) is used
as reference when implementing the feature.
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in V7:
Removed the "_portX" prepend to the xstat names. Keep vhost xstats name
consistent with physical NIC i40e, ixgbe, etc.
Changes in V6:
1. Change xstats from per queue to per port. Keep vhost consistent with
physical NIC i40e, ixgbe, etc.
2. Added the release note.
Changes in V5:
for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
Changes in v4:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
doc/guides/rel_notes/release_16_11.rst | 4 +
drivers/net/vhost/rte_eth_vhost.c | 276 ++++++++++++++++++++++++++++++++-
2 files changed, 275 insertions(+), 5 deletions(-)
diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst
index 66916af..ae90baf 100644
--- a/doc/guides/rel_notes/release_16_11.rst
+++ b/doc/guides/rel_notes/release_16_11.rst
@@ -36,6 +36,10 @@ New Features
This section is a comment. Make sure to start the actual text at the margin.
+* **Added vhost pmd xstats support.**
+
+ Added vhost pmd extended statistics from per port perspective.
+
Resolved Issues
---------------
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index d99d4ee..ef7b037 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,10 +72,30 @@ static struct ether_addr base_eth_addr = {
}
};
+enum vhost_xstats_pkts {
+ VHOST_UNDERSIZE_PKT = 0,
+ VHOST_64_PKT,
+ VHOST_65_TO_127_PKT,
+ VHOST_128_TO_255_PKT,
+ VHOST_256_TO_511_PKT,
+ VHOST_512_TO_1023_PKT,
+ VHOST_1024_TO_1522_PKT,
+ VHOST_1523_TO_MAX_PKT,
+ VHOST_BROADCAST_PKT,
+ VHOST_MULTICAST_PKT,
+ VHOST_UNICAST_PKT,
+ VHOST_ERRORS_PKT,
+ VHOST_ERRORS_FRAGMENTED,
+ VHOST_ERRORS_JABBER,
+ VHOST_UNKNOWN_PROTOCOL,
+ VHOST_XSTATS_MAX,
+};
+
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
+ uint64_t xstats[VHOST_XSTATS_MAX];
};
struct vhost_queue {
@@ -86,11 +106,7 @@ struct vhost_queue {
struct rte_mempool *mb_pool;
uint8_t port;
uint16_t virtqueue_id;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
- uint64_t missed_pkts;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
+ struct vhost_stats stats;
};
struct pmd_internal {
@@ -133,6 +149,242 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+ char name[VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rt]_portX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_portX_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
+ sizeof(vhost_rxport_stat_strings[0]))
+
+#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
+ sizeof(vhost_txport_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vq = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit __rte_unused)
+{
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (!xstats_names)
+ return nstats;
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_%s", vhost_rxport_stat_strings[t].name);
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_%s", vhost_txport_stat_strings[t].name);
+ count++;
+ }
+ return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+ struct vhost_queue *vq = NULL;
+ unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ + vq->stats.missed_pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_rxport_stat_strings[t].offset);
+ }
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_txport_stat_strings[t].offset);
+ }
+ count++;
+ }
+ return count;
+}
+
+static inline void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+ struct rte_mbuf *mbuf)
+{
+ struct ether_addr *ea = NULL;
+ struct vhost_stats *pstats = &vq->stats;
+
+ ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ pstats->xstats[VHOST_BROADCAST_PKT]++;
+ else
+ pstats->xstats[VHOST_MULTICAST_PKT]++;
+ }
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t count)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = 0; i < count ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ pstats->xstats[VHOST_64_PKT]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ pstats->xstats[index]++;
+ } else {
+ if (pkt_len < 64)
+ pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+ else if (pkt_len <= 1522)
+ pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+ else if (pkt_len > 1522)
+ pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+ }
+ vhost_count_multicast_broadcast(vq, bufs[i]);
+ }
+}
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -158,6 +410,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
r->stats.bytes += bufs[i]->pkt_len;
}
+ vhost_update_packet_xstats(r, bufs, nb_rx);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -188,6 +442,15 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_tx); i++)
r->stats.bytes += bufs[i]->pkt_len;
+ vhost_update_packet_xstats(r, bufs, nb_tx);
+
+ /* According to RFC2863 page42 section ifHCOutMulticastPkts and
+ * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+ * are increased when packets are not transmitted successfully.
+ */
+ for (i = nb_tx; i < nb_bufs; i++)
+ vhost_count_multicast_broadcast(r, bufs[i]);
+
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
out:
@@ -688,6 +951,9 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
};
static int
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v7 1/2] net/vhost: add a new defined stats struct
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
@ 2016-09-29 1:55 ` Yuanhan Liu
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 0/2] net/vhost: add pmd xstats support Zhiyong Yang
1 sibling, 0 replies; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-29 1:55 UTC (permalink / raw)
To: Zhiyong Yang; +Cc: dev, ciara.loftus
On Wed, Sep 28, 2016 at 09:26:47PM +0800, Zhiyong Yang wrote:
> The patch moves all stats counters to a new defined struct vhost_stats
> as follows, in order to manage all stats counters in a unified way and
> simplify the subsequent function implementation(vhost_dev_xstats_reset).
>
> Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
> ---
>
> Changes in v4:
> A queue can be only used as TX or RX, So, we can use pkts instead of
> rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
> before modification:
> struct vhost_stats {
> uint64_t rx_pkts;
> uint64_t tx_pkts;
> uint64_t missed_pkts;
> uint64_t rx_bytes;
> uint64_t tx_bytes;
> };
> New struct vhost_stats definition as follows:
> struct vhost_stats {
> uint64_t pkts;
> uint64_t bytes;
> uint64_t missed_pkts;
> };
>
> drivers/net/vhost/rte_eth_vhost.c | 36 +++++++++++++++++++++---------------
> 1 file changed, 21 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
> index 7539cd4..d99d4ee 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -72,6 +72,12 @@ static struct ether_addr base_eth_addr = {
> }
> };
>
> +struct vhost_stats {
> + uint64_t pkts;
> + uint64_t bytes;
> + uint64_t missed_pkts;
> +};
> +
> struct vhost_queue {
> int vid;
> rte_atomic32_t allow_queuing;
> @@ -145,11 +151,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
> nb_rx = rte_vhost_dequeue_burst(r->vid,
> r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
>
> - r->rx_pkts += nb_rx;
> + r->stats.pkts += nb_rx;
My robot caught a build error:
/yeti/vm/ubuntu-initrd-16.04-x86_64-build/dpdk/drivers/net/vhost/rte_eth_vhost.c:154:5:
error: no member named 'stats' in 'struct vhost_queue'
r->stats.pkts += nb_rx;
~ ^
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v7 2/2] net/vhost: add pmd xstats
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 " Zhiyong Yang
@ 2016-09-29 8:48 ` Loftus, Ciara
2016-09-29 12:02 ` Yuanhan Liu
0 siblings, 1 reply; 57+ messages in thread
From: Loftus, Ciara @ 2016-09-29 8:48 UTC (permalink / raw)
To: Yang, Zhiyong, dev; +Cc: yuanhan.liu
>
> This feature adds vhost pmd extended statistics from per port perspective
> in order to meet the requirements of the applications such as OVS etc.
>
> The statistics counters are based on RFC 2819 and RFC 2863 as follows:
>
> rx/tx_good_packets
> rx/tx_total_bytes
> rx/tx_missed_pkts
> rx/tx_broadcast_packets
> rx/tx_multicast_packets
> rx/tx_unicast_packets
> rx/tx_undersize_errors
> rx/tx_size_64_packets
> rx/tx_size_65_to_127_packets;
> rx/tx_size_128_to_255_packets;
> rx/tx_size_256_to_511_packets;
> rx/tx_size_512_to_1023_packets;
> rx/tx_size_1024_to_1522_packets;
> rx/tx_1523_to_max_packets;
> rx/tx_errors
> rx_fragmented_errors
> rx_jabber_errors
> rx_unknown_protos_packets;
>
> No API is changed or added.
> rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
> supported,
> rte_eth_xstats_get() to retrieve vhost extended statistics,
> rte_eth_xstats_reset() to reset vhost extended statistics.
>
> The usage of vhost pmd xstats is the same as virtio pmd xstats.
> for example, when test-pmd application is running in interactive mode
> vhost pmd xstats will support the two following commands:
>
> show port xstats all | port_id will show vhost xstats
> clear port xstats all | port_id will reset vhost xstats
>
> net/virtio pmd xstats(the function virtio_update_packet_stats) is used
> as reference when implementing the feature.
>
> Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
> ---
>
> Changes in V7:
>
> Removed the "_portX" prepend to the xstat names. Keep vhost xstats name
> consistent with physical NIC i40e, ixgbe, etc.
>
> Changes in V6:
>
> 1. Change xstats from per queue to per port. Keep vhost consistent with
> physical NIC i40e, ixgbe, etc.
> 2. Added the release note.
>
> Changes in V5:
> for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
> of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
>
> Changes in v4:
> 1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we
> can
> define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
> 2. restore unicast_packets and update it in the function
> vhost_dev_xstats_get
> 3. move the loop out of function vhost_count_multicast_broadcast in order
> to reduce the computation.
>
> Changes in v3:
> 1. rework the vhost_update_packet_xstats and separate it into two parts.
> One function deals with the generic packets update, another one deals
> with increasing the broadcast and multicast with failure packets sent
> according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
> 2. define enum vhost_stat_pkts to replace the magic numbers and enhance
> the code readability.
> 3. remove some unnecessary type casts and fix one format issue.
>
> Changes in v2:
> 1. remove the compiling switch.
> 2. fix two code bugs.
>
> doc/guides/rel_notes/release_16_11.rst | 4 +
> drivers/net/vhost/rte_eth_vhost.c | 276
> ++++++++++++++++++++++++++++++++-
> 2 files changed, 275 insertions(+), 5 deletions(-)
>
> diff --git a/doc/guides/rel_notes/release_16_11.rst
> b/doc/guides/rel_notes/release_16_11.rst
> index 66916af..ae90baf 100644
> --- a/doc/guides/rel_notes/release_16_11.rst
> +++ b/doc/guides/rel_notes/release_16_11.rst
> @@ -36,6 +36,10 @@ New Features
>
> This section is a comment. Make sure to start the actual text at the
> margin.
>
> +* **Added vhost pmd xstats support.**
> +
> + Added vhost pmd extended statistics from per port perspective.
> +
>
> Resolved Issues
> ---------------
> diff --git a/drivers/net/vhost/rte_eth_vhost.c
> b/drivers/net/vhost/rte_eth_vhost.c
> index d99d4ee..ef7b037 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -72,10 +72,30 @@ static struct ether_addr base_eth_addr = {
> }
> };
>
> +enum vhost_xstats_pkts {
> + VHOST_UNDERSIZE_PKT = 0,
> + VHOST_64_PKT,
> + VHOST_65_TO_127_PKT,
> + VHOST_128_TO_255_PKT,
> + VHOST_256_TO_511_PKT,
> + VHOST_512_TO_1023_PKT,
> + VHOST_1024_TO_1522_PKT,
> + VHOST_1523_TO_MAX_PKT,
> + VHOST_BROADCAST_PKT,
> + VHOST_MULTICAST_PKT,
> + VHOST_UNICAST_PKT,
> + VHOST_ERRORS_PKT,
> + VHOST_ERRORS_FRAGMENTED,
> + VHOST_ERRORS_JABBER,
> + VHOST_UNKNOWN_PROTOCOL,
> + VHOST_XSTATS_MAX,
> +};
> +
> struct vhost_stats {
> uint64_t pkts;
> uint64_t bytes;
> uint64_t missed_pkts;
> + uint64_t xstats[VHOST_XSTATS_MAX];
> };
>
> struct vhost_queue {
> @@ -86,11 +106,7 @@ struct vhost_queue {
> struct rte_mempool *mb_pool;
> uint8_t port;
> uint16_t virtqueue_id;
> - uint64_t rx_pkts;
> - uint64_t tx_pkts;
> - uint64_t missed_pkts;
> - uint64_t rx_bytes;
> - uint64_t tx_bytes;
> + struct vhost_stats stats;
> };
>
> struct pmd_internal {
> @@ -133,6 +149,242 @@ struct rte_vhost_vring_state {
>
> static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
>
> +#define VHOST_XSTATS_NAME_SIZE 64
> +
> +struct vhost_xstats_name_off {
> + char name[VHOST_XSTATS_NAME_SIZE];
> + uint64_t offset;
> +};
> +
> +/* [rt]_portX_ is prepended to the name string here */
> +static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
> + {"good_packets",
> + offsetof(struct vhost_queue, stats.pkts)},
> + {"total_bytes",
> + offsetof(struct vhost_queue, stats.bytes)},
> + {"missed_pkts",
> + offsetof(struct vhost_queue, stats.missed_pkts)},
> + {"broadcast_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_BROADCAST_PKT])},
> + {"multicast_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_MULTICAST_PKT])},
> + {"unicast_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
> + {"undersize_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_UNDERSIZE_PKT])},
> + {"size_64_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> + {"size_65_to_127_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_65_TO_127_PKT])},
> + {"size_128_to_255_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_128_TO_255_PKT])},
> + {"size_256_to_511_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_256_TO_511_PKT])},
> + {"size_512_to_1023_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_512_TO_1023_PKT])},
> + {"size_1024_to_1522_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_1024_TO_1522_PKT])},
> + {"size_1523_to_max_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_1523_TO_MAX_PKT])},
> + {"errors_with_bad_CRC",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
> + {"fragmented_errors",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_ERRORS_FRAGMENTED])},
> + {"jabber_errors",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_ERRORS_JABBER])},
> + {"unknown_protos_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
> +};
> +
> +/* [tx]_portX_ is prepended to the name string here */
> +static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
> + {"good_packets",
> + offsetof(struct vhost_queue, stats.pkts)},
> + {"total_bytes",
> + offsetof(struct vhost_queue, stats.bytes)},
> + {"missed_pkts",
> + offsetof(struct vhost_queue, stats.missed_pkts)},
> + {"broadcast_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_BROADCAST_PKT])},
> + {"multicast_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_MULTICAST_PKT])},
> + {"unicast_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
> + {"undersize_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_UNDERSIZE_PKT])},
> + {"size_64_packets",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
> + {"size_65_to_127_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_65_TO_127_PKT])},
> + {"size_128_to_255_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_128_TO_255_PKT])},
> + {"size_256_to_511_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_256_TO_511_PKT])},
> + {"size_512_to_1023_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_512_TO_1023_PKT])},
> + {"size_1024_to_1522_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_1024_TO_1522_PKT])},
> + {"size_1523_to_max_packets",
> + offsetof(struct vhost_queue,
> stats.xstats[VHOST_1523_TO_MAX_PKT])},
> + {"errors_with_bad_CRC",
> + offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
> +};
> +
> +#define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
> + sizeof(vhost_rxport_stat_strings[0]))
> +
> +#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
> + sizeof(vhost_txport_stat_strings[0]))
> +
> +static void
> +vhost_dev_xstats_reset(struct rte_eth_dev *dev)
> +{
> + struct vhost_queue *vq = NULL;
> + unsigned int i = 0;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + vq = dev->data->rx_queues[i];
> + if (!vq)
> + continue;
> + memset(&vq->stats, 0, sizeof(vq->stats));
> + }
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + vq = dev->data->tx_queues[i];
> + if (!vq)
> + continue;
> + memset(&vq->stats, 0, sizeof(vq->stats));
> + }
> +}
> +
> +static int
> +vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
> + struct rte_eth_xstat_name *xstats_names,
> + unsigned int limit __rte_unused)
> +{
> + unsigned int t = 0;
> + int count = 0;
> + int nstats = VHOST_NB_XSTATS_RXPORT +
> VHOST_NB_XSTATS_TXPORT;
> +
> + if (!xstats_names)
> + return nstats;
> + for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
> + snprintf(xstats_names[count].name,
> + sizeof(xstats_names[count].name),
> + "rx_%s", vhost_rxport_stat_strings[t].name);
> + count++;
> + }
> + for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
> + snprintf(xstats_names[count].name,
> + sizeof(xstats_names[count].name),
> + "tx_%s", vhost_txport_stat_strings[t].name);
> + count++;
> + }
> + return count;
> +}
> +
> +static int
> +vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat
> *xstats,
> + unsigned int n)
> +{
> + unsigned int i;
> + unsigned int t;
> + unsigned int count = 0;
> + struct vhost_queue *vq = NULL;
> + unsigned int nxstats = VHOST_NB_XSTATS_RXPORT +
> VHOST_NB_XSTATS_TXPORT;
> +
> + if (n < nxstats)
> + return nxstats;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + vq = dev->data->rx_queues[i];
> + if (!vq)
> + continue;
> + vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> + - (vq->stats.xstats[VHOST_BROADCAST_PKT]
> + + vq-
> >stats.xstats[VHOST_MULTICAST_PKT]);
> + }
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + vq = dev->data->tx_queues[i];
> + if (!vq)
> + continue;
> + vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> + + vq->stats.missed_pkts
> + - (vq->stats.xstats[VHOST_BROADCAST_PKT]
> + + vq-
> >stats.xstats[VHOST_MULTICAST_PKT]);
> + }
> + for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
> + xstats[count].value = 0;
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + vq = dev->data->rx_queues[i];
> + if (!vq)
> + continue;
> + xstats[count].value +=
> + *(uint64_t *)(((char *)vq)
> + + vhost_rxport_stat_strings[t].offset);
> + }
> + count++;
> + }
> + for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
> + xstats[count].value = 0;
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + vq = dev->data->tx_queues[i];
> + if (!vq)
> + continue;
> + xstats[count].value +=
> + *(uint64_t *)(((char *)vq)
> + + vhost_txport_stat_strings[t].offset);
> + }
> + count++;
> + }
> + return count;
> +}
> +
> +static inline void
> +vhost_count_multicast_broadcast(struct vhost_queue *vq,
> + struct rte_mbuf *mbuf)
> +{
> + struct ether_addr *ea = NULL;
> + struct vhost_stats *pstats = &vq->stats;
> +
> + ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
> + if (is_multicast_ether_addr(ea)) {
> + if (is_broadcast_ether_addr(ea))
> + pstats->xstats[VHOST_BROADCAST_PKT]++;
> + else
> + pstats->xstats[VHOST_MULTICAST_PKT]++;
> + }
> +}
> +
> +static void
> +vhost_update_packet_xstats(struct vhost_queue *vq,
> + struct rte_mbuf **bufs,
> + uint16_t count)
> +{
> + uint32_t pkt_len = 0;
> + uint64_t i = 0;
> + uint64_t index;
> + struct vhost_stats *pstats = &vq->stats;
> +
> + for (i = 0; i < count ; i++) {
> + pkt_len = bufs[i]->pkt_len;
> + if (pkt_len == 64) {
> + pstats->xstats[VHOST_64_PKT]++;
> + } else if (pkt_len > 64 && pkt_len < 1024) {
> + index = (sizeof(pkt_len) * 8)
> + - __builtin_clz(pkt_len) - 5;
> + pstats->xstats[index]++;
> + } else {
> + if (pkt_len < 64)
> + pstats->xstats[VHOST_UNDERSIZE_PKT]++;
> + else if (pkt_len <= 1522)
> + pstats-
> >xstats[VHOST_1024_TO_1522_PKT]++;
> + else if (pkt_len > 1522)
> + pstats-
> >xstats[VHOST_1523_TO_MAX_PKT]++;
> + }
> + vhost_count_multicast_broadcast(vq, bufs[i]);
> + }
> +}
> +
> static uint16_t
> eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
> {
> @@ -158,6 +410,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
> r->stats.bytes += bufs[i]->pkt_len;
> }
>
> + vhost_update_packet_xstats(r, bufs, nb_rx);
> +
> out:
> rte_atomic32_set(&r->while_queuing, 0);
>
> @@ -188,6 +442,15 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
> for (i = 0; likely(i < nb_tx); i++)
> r->stats.bytes += bufs[i]->pkt_len;
>
> + vhost_update_packet_xstats(r, bufs, nb_tx);
> +
> + /* According to RFC2863 page42 section ifHCOutMulticastPkts and
> + * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
> + * are increased when packets are not transmitted successfully.
> + */
> + for (i = nb_tx; i < nb_bufs; i++)
> + vhost_count_multicast_broadcast(r, bufs[i]);
> +
> for (i = 0; likely(i < nb_tx); i++)
> rte_pktmbuf_free(bufs[i]);
> out:
> @@ -688,6 +951,9 @@ static const struct eth_dev_ops ops = {
> .link_update = eth_link_update,
> .stats_get = eth_stats_get,
> .stats_reset = eth_stats_reset,
> + .xstats_reset = vhost_dev_xstats_reset,
> + .xstats_get = vhost_dev_xstats_get,
> + .xstats_get_names = vhost_dev_xstats_get_names,
> };
>
> static int
> --
> 2.5.5
Thanks for the patches Zhiyong. I've tested the size stats and they look good to me.
Tested-by: Ciara Loftus <ciara.loftus@intel.com>
Thanks,
Ciara
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v7 2/2] net/vhost: add pmd xstats
2016-09-29 8:48 ` Loftus, Ciara
@ 2016-09-29 12:02 ` Yuanhan Liu
2016-09-29 12:22 ` Yang, Zhiyong
0 siblings, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-29 12:02 UTC (permalink / raw)
To: Loftus, Ciara; +Cc: Yang, Zhiyong, dev
On Thu, Sep 29, 2016 at 08:48:40AM +0000, Loftus, Ciara wrote:
> Thanks for the patches Zhiyong. I've tested the size stats and they look good to me.
>
> Tested-by: Ciara Loftus <ciara.loftus@intel.com>
Ciara, thanks for testing.
Zhiyong, please fix the build issue so that I can apply. We have to make
sure every commit is buildable, but not only the patchset.
BTW, it's weird that you didn't have this kind of issue before while you
have it for new versions. Seems like you are doing it (amending your patches)
in a wrong way.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v7 2/2] net/vhost: add pmd xstats
2016-09-29 12:02 ` Yuanhan Liu
@ 2016-09-29 12:22 ` Yang, Zhiyong
0 siblings, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-29 12:22 UTC (permalink / raw)
To: Yuanhan Liu, Loftus, Ciara; +Cc: dev
Hi, yuanhan:
The new version has been ready, I will send it later.
I'm thinking about how to avoid this kind of issue
in an appropriate way.
Thanks
Zhiyong
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Thursday, September 29, 2016 8:02 PM
> To: Loftus, Ciara <ciara.loftus@intel.com>
> Cc: Yang, Zhiyong <zhiyong.yang@intel.com>; dev@dpdk.org
> Subject: Re: [PATCH v7 2/2] net/vhost: add pmd xstats
>
> On Thu, Sep 29, 2016 at 08:48:40AM +0000, Loftus, Ciara wrote:
> > Thanks for the patches Zhiyong. I've tested the size stats and they look
> good to me.
> >
> > Tested-by: Ciara Loftus <ciara.loftus@intel.com>
>
> Ciara, thanks for testing.
>
> Zhiyong, please fix the build issue so that I can apply. We have to make sure
> every commit is buildable, but not only the patchset.
>
> BTW, it's weird that you didn't have this kind of issue before while you have it
> for new versions. Seems like you are doing it (amending your patches) in a
> wrong way.
>
> --yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v8 0/2] net/vhost: add pmd xstats support
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-29 1:55 ` Yuanhan Liu
@ 2016-09-29 12:35 ` Zhiyong Yang
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 2/2] net/vhost: add pmd xstats Zhiyong Yang
1 sibling, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-29 12:35 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, ciara.loftus
Patch 1 moves all stats counters to a new defined struct vhost_stats,
in order to manage all stats counters in a consistent way.
Patch 2 adds the pmd xstats support from per port perspective.
RX/TX xstats count the byte without CRC.
Changes in V8:
Patch 1 fix the build error.
Changes in V7:
Patch 2
Removed the "_portX" prepend to the xstat names. Keep vhost xstats name
consistent with physical NIC i40e, ixgbe, etc.
Changes in V6:
Patch 2:
1. Change xstats from per queue to per port. Keep vhost consistent with
physical NIC i40e, ixgbe, etc.
2. Added the release note.
Changes in V5:
Patch 2:
for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
Changes in v4:
Patch 1:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
Before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
Patch 2:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
---
Zhiyong Yang (2):
net/vhost: add a new defined stats struct
net/vhost: add pmd xstats
doc/guides/rel_notes/release_16_11.rst | 3 +
drivers/net/vhost/rte_eth_vhost.c | 312 ++++++++++++++++++++++++++++++---
2 files changed, 295 insertions(+), 20 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v8 1/2] net/vhost: add a new defined stats struct
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 0/2] net/vhost: add pmd xstats support Zhiyong Yang
@ 2016-09-29 12:35 ` Zhiyong Yang
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 2/2] net/vhost: add pmd xstats Zhiyong Yang
1 sibling, 0 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-29 12:35 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, ciara.loftus, Zhiyong Yang
The patch moves all stats counters to a new defined struct vhost_stats
as follows, in order to manage all stats counters in a unified way and
simplify the subsequent function implementation(vhost_dev_xstats_reset).
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in v4:
A queue can be only used as TX or RX, So, we can use pkts instead of
rx_pkts and tx_pkts, the same to rx_bytes and tx_bytes.
before modification:
struct vhost_stats {
uint64_t rx_pkts;
uint64_t tx_pkts;
uint64_t missed_pkts;
uint64_t rx_bytes;
uint64_t tx_bytes;
};
New struct vhost_stats definition as follows:
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
};
drivers/net/vhost/rte_eth_vhost.c | 42 ++++++++++++++++++++-------------------
1 file changed, 22 insertions(+), 20 deletions(-)
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7539cd4..c4af0f7 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,6 +72,12 @@ static struct ether_addr base_eth_addr = {
}
};
+struct vhost_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t missed_pkts;
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -80,11 +86,7 @@ struct vhost_queue {
struct rte_mempool *mb_pool;
uint8_t port;
uint16_t virtqueue_id;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
- uint64_t missed_pkts;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
+ struct vhost_stats stats;
};
struct pmd_internal {
@@ -145,11 +147,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
- r->rx_pkts += nb_rx;
+ r->stats.pkts += nb_rx;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- r->rx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
}
out:
@@ -176,11 +178,11 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
- r->tx_pkts += nb_tx;
- r->missed_pkts += nb_bufs - nb_tx;
+ r->stats.pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
for (i = 0; likely(i < nb_tx); i++)
- r->tx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
@@ -582,10 +584,10 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = vq->rx_pkts;
+ stats->q_ipackets[i] = vq->stats.pkts;
rx_total += stats->q_ipackets[i];
- stats->q_ibytes[i] = vq->rx_bytes;
+ stats->q_ibytes[i] = vq->stats.bytes;
rx_total_bytes += stats->q_ibytes[i];
}
@@ -594,11 +596,11 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- stats->q_opackets[i] = vq->tx_pkts;
- tx_missed_total += vq->missed_pkts;
+ stats->q_opackets[i] = vq->stats.pkts;
+ tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
- stats->q_obytes[i] = vq->tx_bytes;
+ stats->q_obytes[i] = vq->stats.bytes;
tx_total_bytes += stats->q_obytes[i];
}
@@ -619,16 +621,16 @@ eth_stats_reset(struct rte_eth_dev *dev)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- vq->rx_pkts = 0;
- vq->rx_bytes = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- vq->tx_pkts = 0;
- vq->tx_bytes = 0;
- vq->missed_pkts = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ vq->stats.missed_pkts = 0;
}
}
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* [dpdk-dev] [PATCH v8 2/2] net/vhost: add pmd xstats
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
@ 2016-09-29 12:35 ` Zhiyong Yang
2016-09-29 13:04 ` Yuanhan Liu
1 sibling, 1 reply; 57+ messages in thread
From: Zhiyong Yang @ 2016-09-29 12:35 UTC (permalink / raw)
To: dev; +Cc: yuanhan.liu, ciara.loftus, Zhiyong Yang
This feature adds vhost pmd extended statistics from per port perspective
in order to meet the requirements of the applications such as OVS etc.
RX/TX xstats count the bytes without CRC. This is different from physical
NIC stats with CRC.
The statistics counters are based on RFC 2819 and RFC 2863 as follows:
rx/tx_good_packets
rx/tx_total_bytes
rx/tx_missed_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_unicast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;
No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics.
The usage of vhost pmd xstats is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:
show port xstats all | port_id will show vhost xstats
clear port xstats all | port_id will reset vhost xstats
net/virtio pmd xstats(the function virtio_update_packet_stats) is used
as reference when implementing the feature.
Thanks for the patches Zhiyong. I've tested the size stats and they look
good to me.
Tested-by: Ciara Loftus <ciara.loftus@intel.com>
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
Changes in V7:
Removed the "_portX" prepended to the xstat names. Keep vhost xstats name
consistent with physical NIC i40e, ixgbe, etc.
Changes in V6:
1. Change xstats from per queue to per port. Keep vhost consistent with
physical NIC i40e, ixgbe, etc.
2. Added the release note.
Changes in V5:
for vhost_count_multicast_broadcast, passing struct rte_mbuf *buf instead
of struct rte_mbuf **buf and remove the 3th parameter uint16_t count;.
Changes in v4:
1. add a member VHOST_XSTATS_MAX in enum vhost_xstats_pkts, So, we can
define uint64_t xstats[VHOST_XSTATS_MAX]; instead of xstats[16].
2. restore unicast_packets and update it in the function
vhost_dev_xstats_get
3. move the loop out of function vhost_count_multicast_broadcast in order
to reduce the computation.
Changes in v3:
1. rework the vhost_update_packet_xstats and separate it into two parts.
One function deals with the generic packets update, another one deals
with increasing the broadcast and multicast with failure packets sent
according to RFC2863 page42 ifHCOutMulticastPkts ifHCOutBroadcastPkts.
2. define enum vhost_stat_pkts to replace the magic numbers and enhance
the code readability.
3. remove some unnecessary type casts and fix one format issue.
Changes in v2:
1. remove the compiling switch.
2. fix two code bugs.
doc/guides/rel_notes/release_16_11.rst | 3 +
drivers/net/vhost/rte_eth_vhost.c | 270 +++++++++++++++++++++++++++++++++
2 files changed, 273 insertions(+)
diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst
index 1e37e48..5fc93e3 100644
--- a/doc/guides/rel_notes/release_16_11.rst
+++ b/doc/guides/rel_notes/release_16_11.rst
@@ -50,6 +50,9 @@ New Features
* **Added virtio NEON support for ARM.**
+ * **Added vhost pmd xstats support.**
+
+ Added vhost pmd extended statistics from per port perspective.
Resolved Issues
---------------
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index c4af0f7..6943ac9 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -72,10 +72,30 @@ static struct ether_addr base_eth_addr = {
}
};
+enum vhost_xstats_pkts {
+ VHOST_UNDERSIZE_PKT = 0,
+ VHOST_64_PKT,
+ VHOST_65_TO_127_PKT,
+ VHOST_128_TO_255_PKT,
+ VHOST_256_TO_511_PKT,
+ VHOST_512_TO_1023_PKT,
+ VHOST_1024_TO_1522_PKT,
+ VHOST_1523_TO_MAX_PKT,
+ VHOST_BROADCAST_PKT,
+ VHOST_MULTICAST_PKT,
+ VHOST_UNICAST_PKT,
+ VHOST_ERRORS_PKT,
+ VHOST_ERRORS_FRAGMENTED,
+ VHOST_ERRORS_JABBER,
+ VHOST_UNKNOWN_PROTOCOL,
+ VHOST_XSTATS_MAX,
+};
+
struct vhost_stats {
uint64_t pkts;
uint64_t bytes;
uint64_t missed_pkts;
+ uint64_t xstats[VHOST_XSTATS_MAX];
};
struct vhost_queue {
@@ -129,6 +149,242 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+ char name[VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rx]_is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
+ sizeof(vhost_rxport_stat_strings[0]))
+
+#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
+ sizeof(vhost_txport_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vq = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit __rte_unused)
+{
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (!xstats_names)
+ return nstats;
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_%s", vhost_rxport_stat_strings[t].name);
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_%s", vhost_txport_stat_strings[t].name);
+ count++;
+ }
+ return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+ struct vhost_queue *vq = NULL;
+ unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ + vq->stats.missed_pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_rxport_stat_strings[t].offset);
+ }
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_txport_stat_strings[t].offset);
+ }
+ count++;
+ }
+ return count;
+}
+
+static inline void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+ struct rte_mbuf *mbuf)
+{
+ struct ether_addr *ea = NULL;
+ struct vhost_stats *pstats = &vq->stats;
+
+ ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ pstats->xstats[VHOST_BROADCAST_PKT]++;
+ else
+ pstats->xstats[VHOST_MULTICAST_PKT]++;
+ }
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t count)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = 0; i < count ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ pstats->xstats[VHOST_64_PKT]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ pstats->xstats[index]++;
+ } else {
+ if (pkt_len < 64)
+ pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+ else if (pkt_len <= 1522)
+ pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+ else if (pkt_len > 1522)
+ pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+ }
+ vhost_count_multicast_broadcast(vq, bufs[i]);
+ }
+}
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -154,6 +410,8 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
r->stats.bytes += bufs[i]->pkt_len;
}
+ vhost_update_packet_xstats(r, bufs, nb_rx);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -184,6 +442,15 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_tx); i++)
r->stats.bytes += bufs[i]->pkt_len;
+ vhost_update_packet_xstats(r, bufs, nb_tx);
+
+ /* According to RFC2863 page42 section ifHCOutMulticastPkts and
+ * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+ * are increased when packets are not transmitted successfully.
+ */
+ for (i = nb_tx; i < nb_bufs; i++)
+ vhost_count_multicast_broadcast(r, bufs[i]);
+
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
out:
@@ -684,6 +951,9 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
};
static int
--
2.5.5
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v8 2/2] net/vhost: add pmd xstats
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 2/2] net/vhost: add pmd xstats Zhiyong Yang
@ 2016-09-29 13:04 ` Yuanhan Liu
2016-09-29 13:50 ` Yang, Zhiyong
0 siblings, 1 reply; 57+ messages in thread
From: Yuanhan Liu @ 2016-09-29 13:04 UTC (permalink / raw)
To: Zhiyong Yang; +Cc: dev, ciara.loftus
On Thu, Sep 29, 2016 at 08:35:49PM +0800, Zhiyong Yang wrote:
> This feature adds vhost pmd extended statistics from per port perspective
> in order to meet the requirements of the applications such as OVS etc.
> RX/TX xstats count the bytes without CRC. This is different from physical
> NIC stats with CRC.
...
> net/virtio pmd xstats(the function virtio_update_packet_stats) is used
> as reference when implementing the feature.
>
> Thanks for the patches Zhiyong. I've tested the size stats and they look
> good to me.
The above paragraph should not be in the commit log.
> Tested-by: Ciara Loftus <ciara.loftus@intel.com>
>
> Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
There should be no whitespace line bewteen the Tested-by and your SoB.
Fixed, and series applied to dpdk-next-virtio.
--yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
* Re: [dpdk-dev] [PATCH v8 2/2] net/vhost: add pmd xstats
2016-09-29 13:04 ` Yuanhan Liu
@ 2016-09-29 13:50 ` Yang, Zhiyong
0 siblings, 0 replies; 57+ messages in thread
From: Yang, Zhiyong @ 2016-09-29 13:50 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: dev, Loftus, Ciara
Thanks very much, yuanhan.
> -----Original Message-----
> From: Yuanhan Liu [mailto:yuanhan.liu@linux.intel.com]
> Sent: Thursday, September 29, 2016 9:05 PM
> To: Yang, Zhiyong <zhiyong.yang@intel.com>
> Cc: dev@dpdk.org; Loftus, Ciara <ciara.loftus@intel.com>
> Subject: Re: [PATCH v8 2/2] net/vhost: add pmd xstats
>
> On Thu, Sep 29, 2016 at 08:35:49PM +0800, Zhiyong Yang wrote:
> > This feature adds vhost pmd extended statistics from per port
> > perspective in order to meet the requirements of the applications such as
> OVS etc.
> > RX/TX xstats count the bytes without CRC. This is different from
> > physical NIC stats with CRC.
> ...
> > net/virtio pmd xstats(the function virtio_update_packet_stats) is used
> > as reference when implementing the feature.
> >
> > Thanks for the patches Zhiyong. I've tested the size stats and they
> > look good to me.
>
> The above paragraph should not be in the commit log.
>
> > Tested-by: Ciara Loftus <ciara.loftus@intel.com>
> >
> > Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
>
> There should be no whitespace line bewteen the Tested-by and your SoB.
>
> Fixed, and series applied to dpdk-next-virtio.
>
> --yliu
^ permalink raw reply [flat|nested] 57+ messages in thread
end of thread, other threads:[~2016-09-29 13:50 UTC | newest]
Thread overview: 57+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-19 12:16 [dpdk-dev] [PATCH] vhost: add pmd xstats Zhiyong Yang
2016-08-22 7:52 ` Panu Matilainen
2016-08-23 8:04 ` Yang, Zhiyong
2016-08-23 9:45 ` Panu Matilainen
2016-08-24 5:46 ` Yuanhan Liu
2016-08-24 8:44 ` Thomas Monjalon
2016-08-24 12:37 ` Panu Matilainen
2016-08-25 9:21 ` Yang, Zhiyong
2016-08-30 2:45 ` Yao, Lei A
2016-08-30 3:03 ` Xu, Qian Q
2016-08-30 3:21 ` Yao, Lei A
2016-08-31 7:18 ` Yang, Zhiyong
2016-09-01 6:37 ` Yang, Zhiyong
2016-09-09 8:15 ` [dpdk-dev] [PATCH v2] net/vhost: " Zhiyong Yang
2016-09-09 8:40 ` Van Haaren, Harry
2016-09-09 8:54 ` Yang, Zhiyong
2016-09-14 6:20 ` Yuanhan Liu
2016-09-14 7:43 ` Yang, Zhiyong
2016-09-18 13:16 ` Yuanhan Liu
2016-09-19 2:48 ` Yang, Zhiyong
2016-09-14 8:30 ` Yang, Zhiyong
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: add a new stats struct Zhiyong Yang
2016-09-20 10:44 ` Yuanhan Liu
2016-09-21 5:12 ` Yang, Zhiyong
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-21 10:05 ` Zhiyong Yang
2016-09-21 10:05 ` [dpdk-dev] [PATCH v4 2/2] net/vhost: add pmd xstats Zhiyong Yang
2016-09-21 10:57 ` Yuanhan Liu
2016-09-22 1:42 ` Yang, Zhiyong
2016-09-22 2:09 ` Yuanhan Liu
2016-09-21 10:13 ` [dpdk-dev] [PATCH v4 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-29 1:55 ` Yuanhan Liu
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-29 12:35 ` [dpdk-dev] [PATCH v8 2/2] net/vhost: add pmd xstats Zhiyong Yang
2016-09-29 13:04 ` Yuanhan Liu
2016-09-29 13:50 ` Yang, Zhiyong
2016-09-28 13:26 ` [dpdk-dev] [PATCH v7 " Zhiyong Yang
2016-09-29 8:48 ` Loftus, Ciara
2016-09-29 12:02 ` Yuanhan Liu
2016-09-29 12:22 ` Yang, Zhiyong
2016-09-28 8:33 ` [dpdk-dev] [PATCH v6 " Zhiyong Yang
2016-09-22 8:19 ` [dpdk-dev] [PATCH v5 " Zhiyong Yang
2016-09-23 3:56 ` [dpdk-dev] [PATCH v5 0/2] net/vhost: add pmd xstats support Yuanhan Liu
2016-09-28 2:35 ` Yuanhan Liu
2016-09-20 9:36 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: add pmd xstats Zhiyong Yang
2016-09-20 10:56 ` Yuanhan Liu
2016-09-21 7:22 ` Yang, Zhiyong
2016-09-20 11:50 ` Yuanhan Liu
2016-09-21 6:15 ` Yang, Zhiyong
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).