* [PATCH 1/2] net/gve: change offloading capability
@ 2023-02-16 18:58 Levend Sayar
2023-02-16 18:58 ` [PATCH 2/2] net/gve: add extended statistics Levend Sayar
` (2 more replies)
0 siblings, 3 replies; 11+ messages in thread
From: Levend Sayar @ 2023-02-16 18:58 UTC (permalink / raw)
To: junfeng.guo; +Cc: dev, Levend Sayar
Google Virtual NIC is not doing IPv4 checksummimg.
Removed that capability from PMD.
Signed-off-by: Levend Sayar <levendsayar@gmail.com>
---
drivers/net/gve/gve_ethdev.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 06d1b796c8..fef2458a16 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -282,7 +282,6 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_offload_capa = 0;
dev_info->tx_offload_capa =
RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
- RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
--
2.37.1 (Apple Git-137.1)
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 2/2] net/gve: add extended statistics
2023-02-16 18:58 [PATCH 1/2] net/gve: change offloading capability Levend Sayar
@ 2023-02-16 18:58 ` Levend Sayar
2023-02-17 12:34 ` Ferruh Yigit
2023-02-16 20:14 ` [PATCH 1/2] net/gve: change offloading capability Rushil Gupta
2023-02-17 9:11 ` Guo, Junfeng
2 siblings, 1 reply; 11+ messages in thread
From: Levend Sayar @ 2023-02-16 18:58 UTC (permalink / raw)
To: junfeng.guo; +Cc: dev, Levend Sayar
Google Virtual NIC PMD is enriched with extended statistics info.
eth_dev_ops callback names are also synched with eth_dev_ops field names
Signed-off-by: Levend Sayar <levendsayar@gmail.com>
---
drivers/net/gve/gve_ethdev.c | 152 ++++++++++++++++++++++++++++++-----
drivers/net/gve/gve_rx.c | 8 +-
2 files changed, 138 insertions(+), 22 deletions(-)
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index fef2458a16..e31fdce960 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -266,7 +266,7 @@ gve_dev_close(struct rte_eth_dev *dev)
}
static int
-gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+gve_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct gve_priv *priv = dev->data->dev_private;
@@ -319,15 +319,12 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static int
-gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+gve_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
uint16_t i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct gve_tx_queue *txq = dev->data->tx_queues[i];
- if (txq == NULL)
- continue;
-
stats->opackets += txq->packets;
stats->obytes += txq->bytes;
stats->oerrors += txq->errors;
@@ -335,9 +332,6 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct gve_rx_queue *rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
- continue;
-
stats->ipackets += rxq->packets;
stats->ibytes += rxq->bytes;
stats->ierrors += rxq->errors;
@@ -348,15 +342,12 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
static int
-gve_dev_stats_reset(struct rte_eth_dev *dev)
+gve_stats_reset(struct rte_eth_dev *dev)
{
uint16_t i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct gve_tx_queue *txq = dev->data->tx_queues[i];
- if (txq == NULL)
- continue;
-
txq->packets = 0;
txq->bytes = 0;
txq->errors = 0;
@@ -364,9 +355,6 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct gve_rx_queue *rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
- continue;
-
rxq->packets = 0;
rxq->bytes = 0;
rxq->errors = 0;
@@ -377,7 +365,7 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
}
static int
-gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+gve_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct gve_priv *priv = dev->data->dev_private;
int err;
@@ -403,20 +391,144 @@ gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
+static int
+gve_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n)
+{
+ if (xstats) {
+ uint requested = n;
+ uint64_t indx = 0;
+ struct rte_eth_xstat *xstat = xstats;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+ xstat->id = indx++;
+ xstat->value = rxq->packets;
+ if (--requested == 0)
+ return n;
+ xstat++;
+
+ xstat->id = indx++;
+ xstat->value = rxq->bytes;
+ if (--requested == 0)
+ return n;
+ xstat++;
+
+ xstat->id = indx++;
+ xstat->value = rxq->errors;
+ if (--requested == 0)
+ return n;
+ xstat++;
+
+ xstat->id = indx++;
+ xstat->value = rxq->no_mbufs;
+ if (--requested == 0)
+ return n;
+ xstat++;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct gve_tx_queue *txq = dev->data->tx_queues[i];
+ xstat->id = indx++;
+ xstat->value = txq->packets;
+ if (--requested == 0)
+ return n;
+ xstat++;
+
+ xstat->id = indx++;
+ xstat->value = txq->bytes;
+ if (--requested == 0)
+ return n;
+ xstat++;
+
+ xstat->id = indx++;
+ xstat->value = txq->errors;
+ if (--requested == 0)
+ return n;
+ xstat++;
+ }
+ }
+
+ return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
+}
+
+static int
+gve_xstats_reset(struct rte_eth_dev *dev)
+{
+ return gve_stats_reset(dev);
+}
+
+static int
+gve_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names,
+ unsigned int n)
+{
+ if (xstats_names) {
+ uint requested = n;
+ struct rte_eth_xstat_name *xstats_name = xstats_names;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ snprintf(xstats_name->name, sizeof(xstats_name->name),
+ "rx_q%d_packets", i);
+ if (--requested == 0)
+ return n;
+ xstats_name++;
+ snprintf(xstats_name->name, sizeof(xstats_name->name),
+ "rx_q%d_bytes", i);
+ if (--requested == 0)
+ return n;
+ xstats_name++;
+ snprintf(xstats_name->name, sizeof(xstats_name->name),
+ "rx_q%d_errors", i);
+ if (--requested == 0)
+ return n;
+ xstats_name++;
+ snprintf(xstats_name->name, sizeof(xstats_name->name),
+ "rx_q%d_no_mbufs", i);
+ if (--requested == 0)
+ return n;
+ xstats_name++;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ snprintf(xstats_name->name, sizeof(xstats_name->name),
+ "tx_q%d_packets", i);
+ if (--requested == 0)
+ return n;
+ xstats_name++;
+ snprintf(xstats_name->name, sizeof(xstats_name->name),
+ "tx_q%d_bytes", i);
+ if (--requested == 0)
+ return n;
+ xstats_name++;
+ snprintf(xstats_name->name, sizeof(xstats_name->name),
+ "tx_q%d_errors", i);
+ if (--requested == 0)
+ return n;
+ xstats_name++;
+ }
+ }
+
+ return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
+}
+
static const struct eth_dev_ops gve_eth_dev_ops = {
.dev_configure = gve_dev_configure,
.dev_start = gve_dev_start,
.dev_stop = gve_dev_stop,
.dev_close = gve_dev_close,
- .dev_infos_get = gve_dev_info_get,
+ .dev_infos_get = gve_dev_infos_get,
.rx_queue_setup = gve_rx_queue_setup,
.tx_queue_setup = gve_tx_queue_setup,
.rx_queue_release = gve_rx_queue_release,
.tx_queue_release = gve_tx_queue_release,
.link_update = gve_link_update,
- .stats_get = gve_dev_stats_get,
- .stats_reset = gve_dev_stats_reset,
- .mtu_set = gve_dev_mtu_set,
+ .stats_get = gve_stats_get,
+ .stats_reset = gve_stats_reset,
+ .mtu_set = gve_mtu_set,
+ .xstats_get = gve_xstats_get,
+ .xstats_reset = gve_xstats_reset,
+ .xstats_get_names = gve_xstats_get_names,
};
static void
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 66fbcf3930..7687977003 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -22,8 +22,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
if (diag < 0) {
for (i = 0; i < nb_alloc; i++) {
nmb = rte_pktmbuf_alloc(rxq->mpool);
- if (!nmb)
+ if (!nmb) {
+ rxq->no_mbufs++;
break;
+ }
rxq->sw_ring[idx + i] = nmb;
}
if (i != nb_alloc) {
@@ -57,8 +59,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
if (diag < 0) {
for (i = 0; i < nb_alloc; i++) {
nmb = rte_pktmbuf_alloc(rxq->mpool);
- if (!nmb)
+ if (!nmb) {
+ rxq->no_mbufs++;
break;
+ }
rxq->sw_ring[idx + i] = nmb;
}
nb_alloc = i;
--
2.37.1 (Apple Git-137.1)
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] net/gve: change offloading capability
2023-02-16 18:58 [PATCH 1/2] net/gve: change offloading capability Levend Sayar
2023-02-16 18:58 ` [PATCH 2/2] net/gve: add extended statistics Levend Sayar
@ 2023-02-16 20:14 ` Rushil Gupta
2023-02-17 9:07 ` Levend Sayar
2023-02-17 9:11 ` Guo, Junfeng
2 siblings, 1 reply; 11+ messages in thread
From: Rushil Gupta @ 2023-02-16 20:14 UTC (permalink / raw)
To: Levend Sayar; +Cc: junfeng.guo, dev
[-- Attachment #1: Type: text/plain, Size: 1066 bytes --]
Makes sense. The virtual device only does L4 checksum.
Acked-by: Rushil Gupta <rushilg@google.com>
On Thu, Feb 16, 2023 at 10:58 AM Levend Sayar <levendsayar@gmail.com> wrote:
> Google Virtual NIC is not doing IPv4 checksummimg.
> Removed that capability from PMD.
>
> Signed-off-by: Levend Sayar <levendsayar@gmail.com>
> ---
> drivers/net/gve/gve_ethdev.c | 1 -
> 1 file changed, 1 deletion(-)
>
> diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
> index 06d1b796c8..fef2458a16 100644
> --- a/drivers/net/gve/gve_ethdev.c
> +++ b/drivers/net/gve/gve_ethdev.c
> @@ -282,7 +282,6 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
> dev_info->rx_offload_capa = 0;
> dev_info->tx_offload_capa =
> RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
> - RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
> RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
> RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
> RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
> --
> 2.37.1 (Apple Git-137.1)
>
>
[-- Attachment #2: Type: text/html, Size: 1707 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] net/gve: change offloading capability
2023-02-16 20:14 ` [PATCH 1/2] net/gve: change offloading capability Rushil Gupta
@ 2023-02-17 9:07 ` Levend Sayar
0 siblings, 0 replies; 11+ messages in thread
From: Levend Sayar @ 2023-02-17 9:07 UTC (permalink / raw)
To: Rushil Gupta; +Cc: Junfeng Guo, dev
[-- Attachment #1: Type: text/plain, Size: 1347 bytes --]
Thanks Rushil for acknowledging.
Best,
Levend
> On 16 Feb 2023, at 23:14, Rushil Gupta <rushilg@google.com> wrote:
>
> Makes sense. The virtual device only does L4 checksum.
>
> Acked-by: Rushil Gupta <rushilg@google.com <mailto:rushilg@google.com>>
>
> On Thu, Feb 16, 2023 at 10:58 AM Levend Sayar <levendsayar@gmail.com <mailto:levendsayar@gmail.com>> wrote:
>> Google Virtual NIC is not doing IPv4 checksummimg.
>> Removed that capability from PMD.
>>
>> Signed-off-by: Levend Sayar <levendsayar@gmail.com <mailto:levendsayar@gmail.com>>
>> ---
>> drivers/net/gve/gve_ethdev.c | 1 -
>> 1 file changed, 1 deletion(-)
>>
>> diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
>> index 06d1b796c8..fef2458a16 100644
>> --- a/drivers/net/gve/gve_ethdev.c
>> +++ b/drivers/net/gve/gve_ethdev.c
>> @@ -282,7 +282,6 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>> dev_info->rx_offload_capa = 0;
>> dev_info->tx_offload_capa =
>> RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
>> - RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
>> RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
>> RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
>> RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
>> --
>> 2.37.1 (Apple Git-137.1)
>>
[-- Attachment #2: Type: text/html, Size: 2435 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* RE: [PATCH 1/2] net/gve: change offloading capability
2023-02-16 18:58 [PATCH 1/2] net/gve: change offloading capability Levend Sayar
2023-02-16 18:58 ` [PATCH 2/2] net/gve: add extended statistics Levend Sayar
2023-02-16 20:14 ` [PATCH 1/2] net/gve: change offloading capability Rushil Gupta
@ 2023-02-17 9:11 ` Guo, Junfeng
2023-02-17 9:15 ` Levend Sayar
2023-02-20 15:43 ` Ferruh Yigit
2 siblings, 2 replies; 11+ messages in thread
From: Guo, Junfeng @ 2023-02-17 9:11 UTC (permalink / raw)
To: Levend Sayar; +Cc: dev
> -----Original Message-----
> From: Levend Sayar <levendsayar@gmail.com>
> Sent: Friday, February 17, 2023 02:58
> To: Guo, Junfeng <junfeng.guo@intel.com>
> Cc: dev@dpdk.org; Levend Sayar <levendsayar@gmail.com>
> Subject: [PATCH 1/2] net/gve: change offloading capability
>
> Google Virtual NIC is not doing IPv4 checksummimg.
> Removed that capability from PMD.
>
> Signed-off-by: Levend Sayar <levendsayar@gmail.com>
> ---
Acked-by: Junfeng Guo <junfeng.guo@intel.com>
Regards,
Junfeng Guo
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] net/gve: change offloading capability
2023-02-17 9:11 ` Guo, Junfeng
@ 2023-02-17 9:15 ` Levend Sayar
2023-02-20 15:43 ` Ferruh Yigit
1 sibling, 0 replies; 11+ messages in thread
From: Levend Sayar @ 2023-02-17 9:15 UTC (permalink / raw)
To: Guo, Junfeng; +Cc: dev
[-- Attachment #1: Type: text/plain, Size: 710 bytes --]
Thanks Junfeng for acknowledging.
Best,
Levend
> On 17 Feb 2023, at 12:11, Guo, Junfeng <junfeng.guo@intel.com> wrote:
>
>
>
>> -----Original Message-----
>> From: Levend Sayar <levendsayar@gmail.com>
>> Sent: Friday, February 17, 2023 02:58
>> To: Guo, Junfeng <junfeng.guo@intel.com>
>> Cc: dev@dpdk.org; Levend Sayar <levendsayar@gmail.com>
>> Subject: [PATCH 1/2] net/gve: change offloading capability
>>
>> Google Virtual NIC is not doing IPv4 checksummimg.
>> Removed that capability from PMD.
>>
>> Signed-off-by: Levend Sayar <levendsayar@gmail.com>
>> ---
>
> Acked-by: Junfeng Guo <junfeng.guo@intel.com <mailto:junfeng.guo@intel.com>>
>
> Regards,
> Junfeng Guo
[-- Attachment #2: Type: text/html, Size: 5206 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/2] net/gve: add extended statistics
2023-02-16 18:58 ` [PATCH 2/2] net/gve: add extended statistics Levend Sayar
@ 2023-02-17 12:34 ` Ferruh Yigit
2023-02-19 0:26 ` Levend Sayar
0 siblings, 1 reply; 11+ messages in thread
From: Ferruh Yigit @ 2023-02-17 12:34 UTC (permalink / raw)
To: Levend Sayar, junfeng.guo; +Cc: dev
On 2/16/2023 6:58 PM, Levend Sayar wrote:
> Google Virtual NIC PMD is enriched with extended statistics info.
Only queue stats added to xstats, can you please highlight this in the
commit log?
> eth_dev_ops callback names are also synched with eth_dev_ops field names
>
Renaming eth_dev_ops is not related to xstats, and I am not sure if the
change is necessary, can you please drop it from this patch?
> Signed-off-by: Levend Sayar <levendsayar@gmail.com>
> ---
> drivers/net/gve/gve_ethdev.c | 152 ++++++++++++++++++++++++++++++-----
> drivers/net/gve/gve_rx.c | 8 +-
> 2 files changed, 138 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
> index fef2458a16..e31fdce960 100644
> --- a/drivers/net/gve/gve_ethdev.c
> +++ b/drivers/net/gve/gve_ethdev.c
> @@ -266,7 +266,7 @@ gve_dev_close(struct rte_eth_dev *dev)
> }
>
> static int
> -gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> +gve_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> {
> struct gve_priv *priv = dev->data->dev_private;
>
> @@ -319,15 +319,12 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> }
>
> static int
> -gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
> +gve_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
> {
> uint16_t i;
>
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> struct gve_tx_queue *txq = dev->data->tx_queues[i];
> - if (txq == NULL)
> - continue;
> -
I assume check is removed because it is unnecessary, but again not
directly related with the patch, can you also drop these from the patch
to reduce the noise.
> stats->opackets += txq->packets;
> stats->obytes += txq->bytes;
> stats->oerrors += txq->errors;
> @@ -335,9 +332,6 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
> - if (rxq == NULL)
> - continue;
> -
> stats->ipackets += rxq->packets;
> stats->ibytes += rxq->bytes;
> stats->ierrors += rxq->errors;
> @@ -348,15 +342,12 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
> }
>
> static int
> -gve_dev_stats_reset(struct rte_eth_dev *dev)
> +gve_stats_reset(struct rte_eth_dev *dev)
> {
> uint16_t i;
>
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> struct gve_tx_queue *txq = dev->data->tx_queues[i];
> - if (txq == NULL)
> - continue;
> -
> txq->packets = 0;
> txq->bytes = 0;
> txq->errors = 0;
> @@ -364,9 +355,6 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
>
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
> - if (rxq == NULL)
> - continue;
> -
> rxq->packets = 0;
> rxq->bytes = 0;
> rxq->errors = 0;
> @@ -377,7 +365,7 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
> }
>
> static int
> -gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
> +gve_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
> {
> struct gve_priv *priv = dev->data->dev_private;
> int err;
> @@ -403,20 +391,144 @@ gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
> return 0;
> }
>
> +static int
> +gve_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n)
> +{
> + if (xstats) {
To reduce indentation (and increase readability) you can prefer:
``
if !xstats
return count;
<put rest of logic here>
``
> + uint requested = n;
uint? C#? Please prefer standard "unsigned int" type.
> + uint64_t indx = 0;
> + struct rte_eth_xstat *xstat = xstats;
> + uint16_t i;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + struct gve_rx_queue *rxq = dev->data->rx_queues[i];
> + xstat->id = indx++;
> + xstat->value = rxq->packets;
> + if (--requested == 0)
> + return n;
And in this case, ethdev layer does the checks and return accordingly,
so need to try to fill the stats as much as possible, can you please
double check the ethdev API?
> + xstat++;
> +
> + xstat->id = indx++;
> + xstat->value = rxq->bytes;
> + if (--requested == 0)
> + return n;
> + xstat++;
> +
> + xstat->id = indx++;
> + xstat->value = rxq->errors;
> + if (--requested == 0)
> + return n;
> + xstat++;
> +
> + xstat->id = indx++;
> + xstat->value = rxq->no_mbufs;
> + if (--requested == 0)
> + return n;
> + xstat++;
> + }
> +
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + struct gve_tx_queue *txq = dev->data->tx_queues[i];
> + xstat->id = indx++;
> + xstat->value = txq->packets;
> + if (--requested == 0)
> + return n;
> + xstat++;
> +
> + xstat->id = indx++;
> + xstat->value = txq->bytes;
> + if (--requested == 0)
> + return n;
> + xstat++;
> +
> + xstat->id = indx++;
> + xstat->value = txq->errors;
> + if (--requested == 0)
> + return n;
> + xstat++;
> + }
This approach is error to prone and close to extension,
many inplementations prefer to have an itnernal struct to link between
names and values, something like:
struct name_offset {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint32_t offset;
};
'name' holds the xstat name, for this patch it will be only repeating
part of name like 'packets', 'bytes', etc.. and you need to construct
the full name on the fly, that is why it you may prefer to add type to
above struct to diffrenciate Rx and Tx and use it for name creation, up
to you.
'offset' holds the offset of corresponding value in a struct, for you it
is "struct gve_rx_queue" & "struct gve_tx_queue", since there are two
different structs it helps to create helper macro that gets offset from
correct struct.
struct name_offset rx_name_offset[] = {
{ "packets", offsetof(struct gve_rx_queue, packets) },
....
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct gve_rx_queue *rxq = dev->data->rx_queues[i];
addr = (char *)rxq + rx_name_offset[i].offset;
xstats[index++].value = *addr;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct gve_tx_queue *txq = dev->data->tx_queues[i];
addr = (char *)txq + tx_name_offset[i].offset;
xstats[index++].value = *addr;
}
There are many sample of this in other drivers.
And since for this case instead of having fixed numbe of names, there
are dynamiccaly changing queue names,
> + }
> +
> + return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
When above suggested 'name_offset' struct used, you can use size of it
instead of hardcoded 4 & 3 values.
With above sample, it becomes:
return (dev->data->nb_rx_queues * RTE_DIM(rx_name_offset)) +
(dev->data->nb_tx_queues * RTE_DIM(tx_name_offset));
> +}
> +
> +static int
> +gve_xstats_reset(struct rte_eth_dev *dev)
> +{
> + return gve_stats_reset(dev);
> +}
> +
> +static int
> +gve_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names,
> + unsigned int n)
> +{
> + if (xstats_names) {
> + uint requested = n;
> + struct rte_eth_xstat_name *xstats_name = xstats_names;
> + uint16_t i;
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + snprintf(xstats_name->name, sizeof(xstats_name->name),
> + "rx_q%d_packets", i);
> + if (--requested == 0)
> + return n;
> + xstats_name++;
> + snprintf(xstats_name->name, sizeof(xstats_name->name),
> + "rx_q%d_bytes", i);
> + if (--requested == 0)
> + return n;
> + xstats_name++;
> + snprintf(xstats_name->name, sizeof(xstats_name->name),
> + "rx_q%d_errors", i);
> + if (--requested == 0)
> + return n;
> + xstats_name++;
> + snprintf(xstats_name->name, sizeof(xstats_name->name),
> + "rx_q%d_no_mbufs", i);
> + if (--requested == 0)
> + return n;
> + xstats_name++;
> + }
> +
And again according above samples this becomes:
for (i = 0; i < dev->data->nb_rx_queues; i++) {
for (j = 0; j < RTE_DIM(rx_name_offset); j++) {
snprintf(xstats_names[index++].name, sizeof(), "rx_q%d_%s",
i, rx_name_offset[j].name);
}
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + snprintf(xstats_name->name, sizeof(xstats_name->name),
> + "tx_q%d_packets", i);
> + if (--requested == 0)
> + return n;
> + xstats_name++;
> + snprintf(xstats_name->name, sizeof(xstats_name->name),
> + "tx_q%d_bytes", i);
> + if (--requested == 0)
> + return n;
> + xstats_name++;
> + snprintf(xstats_name->name, sizeof(xstats_name->name),
> + "tx_q%d_errors", i);
> + if (--requested == 0)
> + return n;
> + xstats_name++;
> + }
> + }
> +
> + return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
> +}
> +
> static const struct eth_dev_ops gve_eth_dev_ops = {
> .dev_configure = gve_dev_configure,
> .dev_start = gve_dev_start,
> .dev_stop = gve_dev_stop,
> .dev_close = gve_dev_close,
> - .dev_infos_get = gve_dev_info_get,
> + .dev_infos_get = gve_dev_infos_get,
> .rx_queue_setup = gve_rx_queue_setup,
> .tx_queue_setup = gve_tx_queue_setup,
> .rx_queue_release = gve_rx_queue_release,
> .tx_queue_release = gve_tx_queue_release,
> .link_update = gve_link_update,
> - .stats_get = gve_dev_stats_get,
> - .stats_reset = gve_dev_stats_reset,
> - .mtu_set = gve_dev_mtu_set,
> + .stats_get = gve_stats_get,
> + .stats_reset = gve_stats_reset,
> + .mtu_set = gve_mtu_set,
> + .xstats_get = gve_xstats_get,
> + .xstats_reset = gve_xstats_reset,
> + .xstats_get_names = gve_xstats_get_names,
> };
>
> static void
> diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
> index 66fbcf3930..7687977003 100644
> --- a/drivers/net/gve/gve_rx.c
> +++ b/drivers/net/gve/gve_rx.c
> @@ -22,8 +22,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
> if (diag < 0) {
> for (i = 0; i < nb_alloc; i++) {
> nmb = rte_pktmbuf_alloc(rxq->mpool);
> - if (!nmb)
> + if (!nmb) {
> + rxq->no_mbufs++;
Why this is needed, original code is as following:
``
for (i = 0; i < nb_alloc; i++) {
nmb = rte_pktmbuf_alloc(rxq->mpool);
if (!nmb)
break;
rxq->sw_ring[idx + i] = nmb;
}
if (i != nb_alloc) {
rxq->no_mbufs += nb_alloc - i;
nb_alloc = i;
}
``
"if (i != nb_alloc)" block already takes care of 'rxq->no_mbufs' value,
is an additional increment required in the for loop?
And change is unrelated with the patch anyway.
> break;
> + }
> rxq->sw_ring[idx + i] = nmb;
> }
> if (i != nb_alloc) {
> @@ -57,8 +59,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
> if (diag < 0) {
> for (i = 0; i < nb_alloc; i++) {
> nmb = rte_pktmbuf_alloc(rxq->mpool);
> - if (!nmb)
> + if (!nmb) {
> + rxq->no_mbufs++;
> break;
> + }
> rxq->sw_ring[idx + i] = nmb;
> }
> nb_alloc = i;
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/2] net/gve: add extended statistics
2023-02-17 12:34 ` Ferruh Yigit
@ 2023-02-19 0:26 ` Levend Sayar
2023-02-19 20:09 ` Ferruh Yigit
0 siblings, 1 reply; 11+ messages in thread
From: Levend Sayar @ 2023-02-19 0:26 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: Guo, Junfeng, dev
[-- Attachment #1: Type: text/plain, Size: 12337 bytes --]
Ferruh,
Thanks for this detailed review.
I am setting superseded this patch and create a new one.
You’re right at all points.
For rx.no_mbufs counting, I probably overlooked while rebasing my patch and it is mixed with newly came patch.
When I check ethdev layer again, I noticed that when dev_flags has RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS
Rx/tx queue stats are already added. I am pushing a fresh patch for adding rx/tx queue stats.
And also noticed a missing part at rx no_mbufs counting.
Best,
Levend
> On 17 Feb 2023, at 15:34, Ferruh Yigit <ferruh.yigit@amd.com> wrote:
>
> On 2/16/2023 6:58 PM, Levend Sayar wrote:
>> Google Virtual NIC PMD is enriched with extended statistics info.
>
> Only queue stats added to xstats, can you please highlight this in the
> commit log?
>
>> eth_dev_ops callback names are also synched with eth_dev_ops field names
>>
>
> Renaming eth_dev_ops is not related to xstats, and I am not sure if the
> change is necessary, can you please drop it from this patch?
>
>> Signed-off-by: Levend Sayar <levendsayar@gmail.com>
>> ---
>> drivers/net/gve/gve_ethdev.c | 152 ++++++++++++++++++++++++++++++-----
>> drivers/net/gve/gve_rx.c | 8 +-
>> 2 files changed, 138 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
>> index fef2458a16..e31fdce960 100644
>> --- a/drivers/net/gve/gve_ethdev.c
>> +++ b/drivers/net/gve/gve_ethdev.c
>> @@ -266,7 +266,7 @@ gve_dev_close(struct rte_eth_dev *dev)
>> }
>>
>> static int
>> -gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>> +gve_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>> {
>> struct gve_priv *priv = dev->data->dev_private;
>>
>> @@ -319,15 +319,12 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>> }
>>
>> static int
>> -gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>> +gve_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>> {
>> uint16_t i;
>>
>> for (i = 0; i < dev->data->nb_tx_queues; i++) {
>> struct gve_tx_queue *txq = dev->data->tx_queues[i];
>> - if (txq == NULL)
>> - continue;
>> -
>
> I assume check is removed because it is unnecessary, but again not
> directly related with the patch, can you also drop these from the patch
> to reduce the noise.
>
>> stats->opackets += txq->packets;
>> stats->obytes += txq->bytes;
>> stats->oerrors += txq->errors;
>> @@ -335,9 +332,6 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>>
>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>> - if (rxq == NULL)
>> - continue;
>> -
>> stats->ipackets += rxq->packets;
>> stats->ibytes += rxq->bytes;
>> stats->ierrors += rxq->errors;
>> @@ -348,15 +342,12 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>> }
>>
>> static int
>> -gve_dev_stats_reset(struct rte_eth_dev *dev)
>> +gve_stats_reset(struct rte_eth_dev *dev)
>> {
>> uint16_t i;
>>
>> for (i = 0; i < dev->data->nb_tx_queues; i++) {
>> struct gve_tx_queue *txq = dev->data->tx_queues[i];
>> - if (txq == NULL)
>> - continue;
>> -
>> txq->packets = 0;
>> txq->bytes = 0;
>> txq->errors = 0;
>> @@ -364,9 +355,6 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
>>
>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>> - if (rxq == NULL)
>> - continue;
>> -
>> rxq->packets = 0;
>> rxq->bytes = 0;
>> rxq->errors = 0;
>> @@ -377,7 +365,7 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
>> }
>>
>> static int
>> -gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>> +gve_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>> {
>> struct gve_priv *priv = dev->data->dev_private;
>> int err;
>> @@ -403,20 +391,144 @@ gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>> return 0;
>> }
>>
>> +static int
>> +gve_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n)
>> +{
>> + if (xstats) {
>
> To reduce indentation (and increase readability) you can prefer:
> ``
> if !xstats
> return count;
>
> <put rest of logic here>
> ``
>
>> + uint requested = n;
>
> uint? C#? Please prefer standard "unsigned int" type.
>
>> + uint64_t indx = 0;
>> + struct rte_eth_xstat *xstat = xstats;
>> + uint16_t i;
>> +
>> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
>> + struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>> + xstat->id = indx++;
>> + xstat->value = rxq->packets;
>> + if (--requested == 0)
>> + return n;
>
> And in this case, ethdev layer does the checks and return accordingly,
> so need to try to fill the stats as much as possible, can you please
> double check the ethdev API?
>
>> + xstat++;
>> +
>> + xstat->id = indx++;
>> + xstat->value = rxq->bytes;
>> + if (--requested == 0)
>> + return n;
>> + xstat++;
>> +
>> + xstat->id = indx++;
>> + xstat->value = rxq->errors;
>> + if (--requested == 0)
>> + return n;
>> + xstat++;
>> +
>> + xstat->id = indx++;
>> + xstat->value = rxq->no_mbufs;
>> + if (--requested == 0)
>> + return n;
>> + xstat++;
>> + }
>> +
>> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
>> + struct gve_tx_queue *txq = dev->data->tx_queues[i];
>> + xstat->id = indx++;
>> + xstat->value = txq->packets;
>> + if (--requested == 0)
>> + return n;
>> + xstat++;
>> +
>> + xstat->id = indx++;
>> + xstat->value = txq->bytes;
>> + if (--requested == 0)
>> + return n;
>> + xstat++;
>> +
>> + xstat->id = indx++;
>> + xstat->value = txq->errors;
>> + if (--requested == 0)
>> + return n;
>> + xstat++;
>> + }
>
>
> This approach is error to prone and close to extension,
> many inplementations prefer to have an itnernal struct to link between
> names and values, something like:
> struct name_offset {
> char name[RTE_ETH_XSTATS_NAME_SIZE];
> uint32_t offset;
> };
>
> 'name' holds the xstat name, for this patch it will be only repeating
> part of name like 'packets', 'bytes', etc.. and you need to construct
> the full name on the fly, that is why it you may prefer to add type to
> above struct to diffrenciate Rx and Tx and use it for name creation, up
> to you.
>
>
> 'offset' holds the offset of corresponding value in a struct, for you it
> is "struct gve_rx_queue" & "struct gve_tx_queue", since there are two
> different structs it helps to create helper macro that gets offset from
> correct struct.
>
> struct name_offset rx_name_offset[] = {
> { "packets", offsetof(struct gve_rx_queue, packets) },
> ....
> }
>
>
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
> addr = (char *)rxq + rx_name_offset[i].offset;
> xstats[index++].value = *addr;
> }
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> struct gve_tx_queue *txq = dev->data->tx_queues[i];
> addr = (char *)txq + tx_name_offset[i].offset;
> xstats[index++].value = *addr;
> }
>
> There are many sample of this in other drivers.
>
> And since for this case instead of having fixed numbe of names, there
> are dynamiccaly changing queue names,
>
>
>> + }
>> +
>> + return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
>
> When above suggested 'name_offset' struct used, you can use size of it
> instead of hardcoded 4 & 3 values.
>
> With above sample, it becomes:
>
> return (dev->data->nb_rx_queues * RTE_DIM(rx_name_offset)) +
> (dev->data->nb_tx_queues * RTE_DIM(tx_name_offset));
>
>
>> +}
>> +
>> +static int
>> +gve_xstats_reset(struct rte_eth_dev *dev)
>> +{
>> + return gve_stats_reset(dev);
>> +}
>> +
>> +static int
>> +gve_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names,
>> + unsigned int n)
>> +{
>> + if (xstats_names) {
>> + uint requested = n;
>> + struct rte_eth_xstat_name *xstats_name = xstats_names;
>> + uint16_t i;
>> +
>> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
>> + snprintf(xstats_name->name, sizeof(xstats_name->name),
>> + "rx_q%d_packets", i);
>> + if (--requested == 0)
>> + return n;
>> + xstats_name++;
>> + snprintf(xstats_name->name, sizeof(xstats_name->name),
>> + "rx_q%d_bytes", i);
>> + if (--requested == 0)
>> + return n;
>> + xstats_name++;
>> + snprintf(xstats_name->name, sizeof(xstats_name->name),
>> + "rx_q%d_errors", i);
>> + if (--requested == 0)
>> + return n;
>> + xstats_name++;
>> + snprintf(xstats_name->name, sizeof(xstats_name->name),
>> + "rx_q%d_no_mbufs", i);
>> + if (--requested == 0)
>> + return n;
>> + xstats_name++;
>> + }
>> +
>
> And again according above samples this becomes:
>
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> for (j = 0; j < RTE_DIM(rx_name_offset); j++) {
> snprintf(xstats_names[index++].name, sizeof(), "rx_q%d_%s",
> i, rx_name_offset[j].name);
> }
>
>
>> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
>> + snprintf(xstats_name->name, sizeof(xstats_name->name),
>> + "tx_q%d_packets", i);
>> + if (--requested == 0)
>> + return n;
>> + xstats_name++;
>> + snprintf(xstats_name->name, sizeof(xstats_name->name),
>> + "tx_q%d_bytes", i);
>> + if (--requested == 0)
>> + return n;
>> + xstats_name++;
>> + snprintf(xstats_name->name, sizeof(xstats_name->name),
>> + "tx_q%d_errors", i);
>> + if (--requested == 0)
>> + return n;
>> + xstats_name++;
>> + }
>> + }
>> +
>> + return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
>> +}
>> +
>> static const struct eth_dev_ops gve_eth_dev_ops = {
>> .dev_configure = gve_dev_configure,
>> .dev_start = gve_dev_start,
>> .dev_stop = gve_dev_stop,
>> .dev_close = gve_dev_close,
>> - .dev_infos_get = gve_dev_info_get,
>> + .dev_infos_get = gve_dev_infos_get,
>> .rx_queue_setup = gve_rx_queue_setup,
>> .tx_queue_setup = gve_tx_queue_setup,
>> .rx_queue_release = gve_rx_queue_release,
>> .tx_queue_release = gve_tx_queue_release,
>> .link_update = gve_link_update,
>> - .stats_get = gve_dev_stats_get,
>> - .stats_reset = gve_dev_stats_reset,
>> - .mtu_set = gve_dev_mtu_set,
>> + .stats_get = gve_stats_get,
>> + .stats_reset = gve_stats_reset,
>> + .mtu_set = gve_mtu_set,
>> + .xstats_get = gve_xstats_get,
>> + .xstats_reset = gve_xstats_reset,
>> + .xstats_get_names = gve_xstats_get_names,
>> };
>>
>> static void
>> diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
>> index 66fbcf3930..7687977003 100644
>> --- a/drivers/net/gve/gve_rx.c
>> +++ b/drivers/net/gve/gve_rx.c
>> @@ -22,8 +22,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
>> if (diag < 0) {
>> for (i = 0; i < nb_alloc; i++) {
>> nmb = rte_pktmbuf_alloc(rxq->mpool);
>> - if (!nmb)
>> + if (!nmb) {
>> + rxq->no_mbufs++;
>
> Why this is needed, original code is as following:
>
> ``
> for (i = 0; i < nb_alloc; i++) {
> nmb = rte_pktmbuf_alloc(rxq->mpool);
> if (!nmb)
> break;
> rxq->sw_ring[idx + i] = nmb;
> }
> if (i != nb_alloc) {
> rxq->no_mbufs += nb_alloc - i;
> nb_alloc = i;
> }
> ``
>
> "if (i != nb_alloc)" block already takes care of 'rxq->no_mbufs' value,
> is an additional increment required in the for loop?
>
>
> And change is unrelated with the patch anyway.
>
>> break;
>> + }
>> rxq->sw_ring[idx + i] = nmb;
>> }
>> if (i != nb_alloc) {
>> @@ -57,8 +59,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
>> if (diag < 0) {
>> for (i = 0; i < nb_alloc; i++) {
>> nmb = rte_pktmbuf_alloc(rxq->mpool);
>> - if (!nmb)
>> + if (!nmb) {
>> + rxq->no_mbufs++;
>> break;
>> + }
>> rxq->sw_ring[idx + i] = nmb;
>> }
>> nb_alloc = i;
[-- Attachment #2: Type: text/html, Size: 120307 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/2] net/gve: add extended statistics
2023-02-19 0:26 ` Levend Sayar
@ 2023-02-19 20:09 ` Ferruh Yigit
2023-02-19 20:37 ` Levend Sayar
0 siblings, 1 reply; 11+ messages in thread
From: Ferruh Yigit @ 2023-02-19 20:09 UTC (permalink / raw)
To: Levend Sayar; +Cc: Guo, Junfeng, dev
On 2/19/2023 12:26 AM, Levend Sayar wrote:
> Ferruh,
>
> Thanks for this detailed review.
> I am setting superseded this patch and create a new one.
> You’re right at all points.
> For rx.no_mbufs counting, I probably overlooked while rebasing my patch
> and it is mixed with newly came patch.
>
> When I check ethdev layer again, I noticed that when dev_flags
> has RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS
> Rx/tx queue stats are already added. I am pushing a fresh patch for
> adding rx/tx queue stats.
>
Hi Levend,
You are right BUT, 'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' is a temporary
solution and plan is to remove it [1].
Background is, queue stats in "struct rte_eth_stats" has fixed size and
as number of queues supported by devices increase these fields getting
bigger and bigger, the solution we came was to completely remove these
fields from stats struct and get queue statistics via xstats.
During transition 'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' is introduced
until all drivers implement queue stats in xstats. We are not pushing
hard for existing drivers to update but at least requiring new drivers
to implement xstats method.
That is why for net/gve updating queue stats in 'gve_dev_stats_get()'
rejected before, and xstats implementation is requested.
[1]
https://elixir.bootlin.com/dpdk/v22.11.1/source/doc/guides/rel_notes/deprecation.rst#L88
> And also noticed a missing part at rx no_mbufs counting.
>
> Best,
> Levend
>
>
>> On 17 Feb 2023, at 15:34, Ferruh Yigit <ferruh.yigit@amd.com> wrote:
>>
>> On 2/16/2023 6:58 PM, Levend Sayar wrote:
>>> Google Virtual NIC PMD is enriched with extended statistics info.
>>
>> Only queue stats added to xstats, can you please highlight this in the
>> commit log?
>>
>>> eth_dev_ops callback names are also synched with eth_dev_ops field names
>>>
>>
>> Renaming eth_dev_ops is not related to xstats, and I am not sure if the
>> change is necessary, can you please drop it from this patch?
>>
>>> Signed-off-by: Levend Sayar <levendsayar@gmail.com>
>>> ---
>>> drivers/net/gve/gve_ethdev.c | 152 ++++++++++++++++++++++++++++++-----
>>> drivers/net/gve/gve_rx.c | 8 +-
>>> 2 files changed, 138 insertions(+), 22 deletions(-)
>>>
>>> diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
>>> index fef2458a16..e31fdce960 100644
>>> --- a/drivers/net/gve/gve_ethdev.c
>>> +++ b/drivers/net/gve/gve_ethdev.c
>>> @@ -266,7 +266,7 @@ gve_dev_close(struct rte_eth_dev *dev)
>>> }
>>>
>>> static int
>>> -gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
>>> *dev_info)
>>> +gve_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
>>> *dev_info)
>>> {
>>> struct gve_priv *priv = dev->data->dev_private;
>>>
>>> @@ -319,15 +319,12 @@ gve_dev_info_get(struct rte_eth_dev *dev,
>>> struct rte_eth_dev_info *dev_info)
>>> }
>>>
>>> static int
>>> -gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>>> +gve_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>>> {
>>> uint16_t i;
>>>
>>> for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>> struct gve_tx_queue *txq = dev->data->tx_queues[i];
>>> -if (txq == NULL)
>>> -continue;
>>> -
>>
>> I assume check is removed because it is unnecessary, but again not
>> directly related with the patch, can you also drop these from the patch
>> to reduce the noise.
>>
>>> stats->opackets += txq->packets;
>>> stats->obytes += txq->bytes;
>>> stats->oerrors += txq->errors;
>>> @@ -335,9 +332,6 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct
>>> rte_eth_stats *stats)
>>>
>>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>>> -if (rxq == NULL)
>>> -continue;
>>> -
>>> stats->ipackets += rxq->packets;
>>> stats->ibytes += rxq->bytes;
>>> stats->ierrors += rxq->errors;
>>> @@ -348,15 +342,12 @@ gve_dev_stats_get(struct rte_eth_dev *dev,
>>> struct rte_eth_stats *stats)
>>> }
>>>
>>> static int
>>> -gve_dev_stats_reset(struct rte_eth_dev *dev)
>>> +gve_stats_reset(struct rte_eth_dev *dev)
>>> {
>>> uint16_t i;
>>>
>>> for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>> struct gve_tx_queue *txq = dev->data->tx_queues[i];
>>> -if (txq == NULL)
>>> -continue;
>>> -
>>> txq->packets = 0;
>>> txq->bytes = 0;
>>> txq->errors = 0;
>>> @@ -364,9 +355,6 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
>>>
>>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>>> -if (rxq == NULL)
>>> -continue;
>>> -
>>> rxq->packets = 0;
>>> rxq->bytes = 0;
>>> rxq->errors = 0;
>>> @@ -377,7 +365,7 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
>>> }
>>>
>>> static int
>>> -gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>>> +gve_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>>> {
>>> struct gve_priv *priv = dev->data->dev_private;
>>> int err;
>>> @@ -403,20 +391,144 @@ gve_dev_mtu_set(struct rte_eth_dev *dev,
>>> uint16_t mtu)
>>> return 0;
>>> }
>>>
>>> +static int
>>> +gve_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat
>>> *xstats, unsigned int n)
>>> +{
>>> +if (xstats) {
>>
>> To reduce indentation (and increase readability) you can prefer:
>> ``
>> if !xstats
>> return count;
>>
>> <put rest of logic here>
>> ``
>>
>>> +uint requested = n;
>>
>> uint? C#? Please prefer standard "unsigned int" type.
>>
>>> +uint64_t indx = 0;
>>> +struct rte_eth_xstat *xstat = xstats;
>>> +uint16_t i;
>>> +
>>> +for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>> +struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>>> +xstat->id = indx++;
>>> +xstat->value = rxq->packets;
>>> +if (--requested == 0)
>>> +return n;
>>
>> And in this case, ethdev layer does the checks and return accordingly,
>> so need to try to fill the stats as much as possible, can you please
>> double check the ethdev API?
>>
>>> +xstat++;
>>> +
>>> +xstat->id = indx++;
>>> +xstat->value = rxq->bytes;
>>> +if (--requested == 0)
>>> +return n;
>>> +xstat++;
>>> +
>>> +xstat->id = indx++;
>>> +xstat->value = rxq->errors;
>>> +if (--requested == 0)
>>> +return n;
>>> +xstat++;
>>> +
>>> +xstat->id = indx++;
>>> +xstat->value = rxq->no_mbufs;
>>> +if (--requested == 0)
>>> +return n;
>>> +xstat++;
>>> +}
>>> +
>>> +for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>> +struct gve_tx_queue *txq = dev->data->tx_queues[i];
>>> +xstat->id = indx++;
>>> +xstat->value = txq->packets;
>>> +if (--requested == 0)
>>> +return n;
>>> +xstat++;
>>> +
>>> +xstat->id = indx++;
>>> +xstat->value = txq->bytes;
>>> +if (--requested == 0)
>>> +return n;
>>> +xstat++;
>>> +
>>> +xstat->id = indx++;
>>> +xstat->value = txq->errors;
>>> +if (--requested == 0)
>>> +return n;
>>> +xstat++;
>>> +}
>>
>>
>> This approach is error to prone and close to extension,
>> many inplementations prefer to have an itnernal struct to link between
>> names and values, something like:
>> struct name_offset {
>> char name[RTE_ETH_XSTATS_NAME_SIZE];
>> uint32_t offset;
>> };
>>
>> 'name' holds the xstat name, for this patch it will be only repeating
>> part of name like 'packets', 'bytes', etc.. and you need to construct
>> the full name on the fly, that is why it you may prefer to add type to
>> above struct to diffrenciate Rx and Tx and use it for name creation, up
>> to you.
>>
>>
>> 'offset' holds the offset of corresponding value in a struct, for you it
>> is "struct gve_rx_queue" & "struct gve_tx_queue", since there are two
>> different structs it helps to create helper macro that gets offset from
>> correct struct.
>>
>> struct name_offset rx_name_offset[] = {
>> { "packets", offsetof(struct gve_rx_queue, packets) },
>> ....
>> }
>>
>>
>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>> addr = (char *)rxq + rx_name_offset[i].offset;
>> xstats[index++].value = *addr;
>> }
>> for (i = 0; i < dev->data->nb_tx_queues; i++) {
>> struct gve_tx_queue *txq = dev->data->tx_queues[i];
>> addr = (char *)txq + tx_name_offset[i].offset;
>> xstats[index++].value = *addr;
>> }
>>
>> There are many sample of this in other drivers.
>>
>> And since for this case instead of having fixed numbe of names, there
>> are dynamiccaly changing queue names,
>>
>>
>>> +}
>>> +
>>> +return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
>>
>> When above suggested 'name_offset' struct used, you can use size of it
>> instead of hardcoded 4 & 3 values.
>>
>> With above sample, it becomes:
>>
>> return (dev->data->nb_rx_queues * RTE_DIM(rx_name_offset)) +
>> (dev->data->nb_tx_queues * RTE_DIM(tx_name_offset));
>>
>>
>>> +}
>>> +
>>> +static int
>>> +gve_xstats_reset(struct rte_eth_dev *dev)
>>> +{
>>> +return gve_stats_reset(dev);
>>> +}
>>> +
>>> +static int
>>> +gve_xstats_get_names(struct rte_eth_dev *dev, struct
>>> rte_eth_xstat_name *xstats_names,
>>> +unsigned int n)
>>> +{
>>> +if (xstats_names) {
>>> +uint requested = n;
>>> +struct rte_eth_xstat_name *xstats_name = xstats_names;
>>> +uint16_t i;
>>> +
>>> +for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>> +"rx_q%d_packets", i);
>>> +if (--requested == 0)
>>> +return n;
>>> +xstats_name++;
>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>> +"rx_q%d_bytes", i);
>>> +if (--requested == 0)
>>> +return n;
>>> +xstats_name++;
>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>> +"rx_q%d_errors", i);
>>> +if (--requested == 0)
>>> +return n;
>>> +xstats_name++;
>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>> +"rx_q%d_no_mbufs", i);
>>> +if (--requested == 0)
>>> +return n;
>>> +xstats_name++;
>>> +}
>>> +
>>
>> And again according above samples this becomes:
>>
>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>> for (j = 0; j < RTE_DIM(rx_name_offset); j++) {
>> snprintf(xstats_names[index++].name, sizeof(), "rx_q%d_%s",
>> i, rx_name_offset[j].name);
>> }
>>
>>
>>> +for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>> +"tx_q%d_packets", i);
>>> +if (--requested == 0)
>>> +return n;
>>> +xstats_name++;
>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>> +"tx_q%d_bytes", i);
>>> +if (--requested == 0)
>>> +return n;
>>> +xstats_name++;
>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>> +"tx_q%d_errors", i);
>>> +if (--requested == 0)
>>> +return n;
>>> +xstats_name++;
>>> +}
>>> +}
>>> +
>>> +return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
>>> +}
>>> +
>>> static const struct eth_dev_ops gve_eth_dev_ops = {
>>> .dev_configure = gve_dev_configure,
>>> .dev_start = gve_dev_start,
>>> .dev_stop = gve_dev_stop,
>>> .dev_close = gve_dev_close,
>>> -.dev_infos_get = gve_dev_info_get,
>>> +.dev_infos_get = gve_dev_infos_get,
>>> .rx_queue_setup = gve_rx_queue_setup,
>>> .tx_queue_setup = gve_tx_queue_setup,
>>> .rx_queue_release = gve_rx_queue_release,
>>> .tx_queue_release = gve_tx_queue_release,
>>> .link_update = gve_link_update,
>>> -.stats_get = gve_dev_stats_get,
>>> -.stats_reset = gve_dev_stats_reset,
>>> -.mtu_set = gve_dev_mtu_set,
>>> +.stats_get = gve_stats_get,
>>> +.stats_reset = gve_stats_reset,
>>> +.mtu_set = gve_mtu_set,
>>> +.xstats_get = gve_xstats_get,
>>> +.xstats_reset = gve_xstats_reset,
>>> +.xstats_get_names = gve_xstats_get_names,
>>> };
>>>
>>> static void
>>> diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
>>> index 66fbcf3930..7687977003 100644
>>> --- a/drivers/net/gve/gve_rx.c
>>> +++ b/drivers/net/gve/gve_rx.c
>>> @@ -22,8 +22,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
>>> if (diag < 0) {
>>> for (i = 0; i < nb_alloc; i++) {
>>> nmb = rte_pktmbuf_alloc(rxq->mpool);
>>> -if (!nmb)
>>> +if (!nmb) {
>>> +rxq->no_mbufs++;
>>
>> Why this is needed, original code is as following:
>>
>> ``
>> for (i = 0; i < nb_alloc; i++) {
>> nmb = rte_pktmbuf_alloc(rxq->mpool);
>> if (!nmb)
>> break;
>> rxq->sw_ring[idx + i] = nmb;
>> }
>> if (i != nb_alloc) {
>> rxq->no_mbufs += nb_alloc - i;
>> nb_alloc = i;
>> }
>> ``
>>
>> "if (i != nb_alloc)" block already takes care of 'rxq->no_mbufs' value,
>> is an additional increment required in the for loop?
>>
>>
>> And change is unrelated with the patch anyway.
>>
>>> break;
>>> +}
>>> rxq->sw_ring[idx + i] = nmb;
>>> }
>>> if (i != nb_alloc) {
>>> @@ -57,8 +59,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
>>> if (diag < 0) {
>>> for (i = 0; i < nb_alloc; i++) {
>>> nmb = rte_pktmbuf_alloc(rxq->mpool);
>>> -if (!nmb)
>>> +if (!nmb) {
>>> +rxq->no_mbufs++;
>>> break;
>>> +}
>>> rxq->sw_ring[idx + i] = nmb;
>>> }
>>> nb_alloc = i;
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 2/2] net/gve: add extended statistics
2023-02-19 20:09 ` Ferruh Yigit
@ 2023-02-19 20:37 ` Levend Sayar
0 siblings, 0 replies; 11+ messages in thread
From: Levend Sayar @ 2023-02-19 20:37 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: Guo, Junfeng, dev
[-- Attachment #1: Type: text/plain, Size: 13961 bytes --]
Hi Ferruh,
Opps, I was not aware of that rejection.
Thanks for notification.
Let me supersede this patch.
And create a new one.
Best,
Levend
> On 19 Feb 2023, at 23:09, Ferruh Yigit <ferruh.yigit@amd.com> wrote:
>
> On 2/19/2023 12:26 AM, Levend Sayar wrote:
>> Ferruh,
>>
>> Thanks for this detailed review.
>> I am setting superseded this patch and create a new one.
>> You’re right at all points.
>> For rx.no_mbufs counting, I probably overlooked while rebasing my patch
>> and it is mixed with newly came patch.
>>
>> When I check ethdev layer again, I noticed that when dev_flags
>> has RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS
>> Rx/tx queue stats are already added. I am pushing a fresh patch for
>> adding rx/tx queue stats.
>>
>
> Hi Levend,
>
> You are right BUT, 'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' is a temporary
> solution and plan is to remove it [1].
>
> Background is, queue stats in "struct rte_eth_stats" has fixed size and
> as number of queues supported by devices increase these fields getting
> bigger and bigger, the solution we came was to completely remove these
> fields from stats struct and get queue statistics via xstats.
>
> During transition 'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' is introduced
> until all drivers implement queue stats in xstats. We are not pushing
> hard for existing drivers to update but at least requiring new drivers
> to implement xstats method.
>
> That is why for net/gve updating queue stats in 'gve_dev_stats_get()'
> rejected before, and xstats implementation is requested.
>
>
> [1]
> https://elixir.bootlin.com/dpdk/v22.11.1/source/doc/guides/rel_notes/deprecation.rst#L88
>
>
>> And also noticed a missing part at rx no_mbufs counting.
>>
>> Best,
>> Levend
>>
>>
>>> On 17 Feb 2023, at 15:34, Ferruh Yigit <ferruh.yigit@amd.com> wrote:
>>>
>>> On 2/16/2023 6:58 PM, Levend Sayar wrote:
>>>> Google Virtual NIC PMD is enriched with extended statistics info.
>>>
>>> Only queue stats added to xstats, can you please highlight this in the
>>> commit log?
>>>
>>>> eth_dev_ops callback names are also synched with eth_dev_ops field names
>>>>
>>>
>>> Renaming eth_dev_ops is not related to xstats, and I am not sure if the
>>> change is necessary, can you please drop it from this patch?
>>>
>>>> Signed-off-by: Levend Sayar <levendsayar@gmail.com>
>>>> ---
>>>> drivers/net/gve/gve_ethdev.c | 152 ++++++++++++++++++++++++++++++-----
>>>> drivers/net/gve/gve_rx.c | 8 +-
>>>> 2 files changed, 138 insertions(+), 22 deletions(-)
>>>>
>>>> diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
>>>> index fef2458a16..e31fdce960 100644
>>>> --- a/drivers/net/gve/gve_ethdev.c
>>>> +++ b/drivers/net/gve/gve_ethdev.c
>>>> @@ -266,7 +266,7 @@ gve_dev_close(struct rte_eth_dev *dev)
>>>> }
>>>>
>>>> static int
>>>> -gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
>>>> *dev_info)
>>>> +gve_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
>>>> *dev_info)
>>>> {
>>>> struct gve_priv *priv = dev->data->dev_private;
>>>>
>>>> @@ -319,15 +319,12 @@ gve_dev_info_get(struct rte_eth_dev *dev,
>>>> struct rte_eth_dev_info *dev_info)
>>>> }
>>>>
>>>> static int
>>>> -gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>>>> +gve_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
>>>> {
>>>> uint16_t i;
>>>>
>>>> for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>>> struct gve_tx_queue *txq = dev->data->tx_queues[i];
>>>> -if (txq == NULL)
>>>> -continue;
>>>> -
>>>
>>> I assume check is removed because it is unnecessary, but again not
>>> directly related with the patch, can you also drop these from the patch
>>> to reduce the noise.
>>>
>>>> stats->opackets += txq->packets;
>>>> stats->obytes += txq->bytes;
>>>> stats->oerrors += txq->errors;
>>>> @@ -335,9 +332,6 @@ gve_dev_stats_get(struct rte_eth_dev *dev, struct
>>>> rte_eth_stats *stats)
>>>>
>>>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>>> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>>>> -if (rxq == NULL)
>>>> -continue;
>>>> -
>>>> stats->ipackets += rxq->packets;
>>>> stats->ibytes += rxq->bytes;
>>>> stats->ierrors += rxq->errors;
>>>> @@ -348,15 +342,12 @@ gve_dev_stats_get(struct rte_eth_dev *dev,
>>>> struct rte_eth_stats *stats)
>>>> }
>>>>
>>>> static int
>>>> -gve_dev_stats_reset(struct rte_eth_dev *dev)
>>>> +gve_stats_reset(struct rte_eth_dev *dev)
>>>> {
>>>> uint16_t i;
>>>>
>>>> for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>>> struct gve_tx_queue *txq = dev->data->tx_queues[i];
>>>> -if (txq == NULL)
>>>> -continue;
>>>> -
>>>> txq->packets = 0;
>>>> txq->bytes = 0;
>>>> txq->errors = 0;
>>>> @@ -364,9 +355,6 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
>>>>
>>>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>>> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>>>> -if (rxq == NULL)
>>>> -continue;
>>>> -
>>>> rxq->packets = 0;
>>>> rxq->bytes = 0;
>>>> rxq->errors = 0;
>>>> @@ -377,7 +365,7 @@ gve_dev_stats_reset(struct rte_eth_dev *dev)
>>>> }
>>>>
>>>> static int
>>>> -gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>>>> +gve_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>>>> {
>>>> struct gve_priv *priv = dev->data->dev_private;
>>>> int err;
>>>> @@ -403,20 +391,144 @@ gve_dev_mtu_set(struct rte_eth_dev *dev,
>>>> uint16_t mtu)
>>>> return 0;
>>>> }
>>>>
>>>> +static int
>>>> +gve_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat
>>>> *xstats, unsigned int n)
>>>> +{
>>>> +if (xstats) {
>>>
>>> To reduce indentation (and increase readability) you can prefer:
>>> ``
>>> if !xstats
>>> return count;
>>>
>>> <put rest of logic here>
>>> ``
>>>
>>>> +uint requested = n;
>>>
>>> uint? C#? Please prefer standard "unsigned int" type.
>>>
>>>> +uint64_t indx = 0;
>>>> +struct rte_eth_xstat *xstat = xstats;
>>>> +uint16_t i;
>>>> +
>>>> +for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>>> +struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>>>> +xstat->id = indx++;
>>>> +xstat->value = rxq->packets;
>>>> +if (--requested == 0)
>>>> +return n;
>>>
>>> And in this case, ethdev layer does the checks and return accordingly,
>>> so need to try to fill the stats as much as possible, can you please
>>> double check the ethdev API?
>>>
>>>> +xstat++;
>>>> +
>>>> +xstat->id = indx++;
>>>> +xstat->value = rxq->bytes;
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstat++;
>>>> +
>>>> +xstat->id = indx++;
>>>> +xstat->value = rxq->errors;
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstat++;
>>>> +
>>>> +xstat->id = indx++;
>>>> +xstat->value = rxq->no_mbufs;
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstat++;
>>>> +}
>>>> +
>>>> +for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>>> +struct gve_tx_queue *txq = dev->data->tx_queues[i];
>>>> +xstat->id = indx++;
>>>> +xstat->value = txq->packets;
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstat++;
>>>> +
>>>> +xstat->id = indx++;
>>>> +xstat->value = txq->bytes;
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstat++;
>>>> +
>>>> +xstat->id = indx++;
>>>> +xstat->value = txq->errors;
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstat++;
>>>> +}
>>>
>>>
>>> This approach is error to prone and close to extension,
>>> many inplementations prefer to have an itnernal struct to link between
>>> names and values, something like:
>>> struct name_offset {
>>> char name[RTE_ETH_XSTATS_NAME_SIZE];
>>> uint32_t offset;
>>> };
>>>
>>> 'name' holds the xstat name, for this patch it will be only repeating
>>> part of name like 'packets', 'bytes', etc.. and you need to construct
>>> the full name on the fly, that is why it you may prefer to add type to
>>> above struct to diffrenciate Rx and Tx and use it for name creation, up
>>> to you.
>>>
>>>
>>> 'offset' holds the offset of corresponding value in a struct, for you it
>>> is "struct gve_rx_queue" & "struct gve_tx_queue", since there are two
>>> different structs it helps to create helper macro that gets offset from
>>> correct struct.
>>>
>>> struct name_offset rx_name_offset[] = {
>>> { "packets", offsetof(struct gve_rx_queue, packets) },
>>> ....
>>> }
>>>
>>>
>>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>> struct gve_rx_queue *rxq = dev->data->rx_queues[i];
>>> addr = (char *)rxq + rx_name_offset[i].offset;
>>> xstats[index++].value = *addr;
>>> }
>>> for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>> struct gve_tx_queue *txq = dev->data->tx_queues[i];
>>> addr = (char *)txq + tx_name_offset[i].offset;
>>> xstats[index++].value = *addr;
>>> }
>>>
>>> There are many sample of this in other drivers.
>>>
>>> And since for this case instead of having fixed numbe of names, there
>>> are dynamiccaly changing queue names,
>>>
>>>
>>>> +}
>>>> +
>>>> +return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
>>>
>>> When above suggested 'name_offset' struct used, you can use size of it
>>> instead of hardcoded 4 & 3 values.
>>>
>>> With above sample, it becomes:
>>>
>>> return (dev->data->nb_rx_queues * RTE_DIM(rx_name_offset)) +
>>> (dev->data->nb_tx_queues * RTE_DIM(tx_name_offset));
>>>
>>>
>>>> +}
>>>> +
>>>> +static int
>>>> +gve_xstats_reset(struct rte_eth_dev *dev)
>>>> +{
>>>> +return gve_stats_reset(dev);
>>>> +}
>>>> +
>>>> +static int
>>>> +gve_xstats_get_names(struct rte_eth_dev *dev, struct
>>>> rte_eth_xstat_name *xstats_names,
>>>> +unsigned int n)
>>>> +{
>>>> +if (xstats_names) {
>>>> +uint requested = n;
>>>> +struct rte_eth_xstat_name *xstats_name = xstats_names;
>>>> +uint16_t i;
>>>> +
>>>> +for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>>> +"rx_q%d_packets", i);
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstats_name++;
>>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>>> +"rx_q%d_bytes", i);
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstats_name++;
>>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>>> +"rx_q%d_errors", i);
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstats_name++;
>>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>>> +"rx_q%d_no_mbufs", i);
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstats_name++;
>>>> +}
>>>> +
>>>
>>> And again according above samples this becomes:
>>>
>>> for (i = 0; i < dev->data->nb_rx_queues; i++) {
>>> for (j = 0; j < RTE_DIM(rx_name_offset); j++) {
>>> snprintf(xstats_names[index++].name, sizeof(), "rx_q%d_%s",
>>> i, rx_name_offset[j].name);
>>> }
>>>
>>>
>>>> +for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>>> +"tx_q%d_packets", i);
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstats_name++;
>>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>>> +"tx_q%d_bytes", i);
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstats_name++;
>>>> +snprintf(xstats_name->name, sizeof(xstats_name->name),
>>>> +"tx_q%d_errors", i);
>>>> +if (--requested == 0)
>>>> +return n;
>>>> +xstats_name++;
>>>> +}
>>>> +}
>>>> +
>>>> +return (dev->data->nb_rx_queues * 4) + (dev->data->nb_tx_queues * 3);
>>>> +}
>>>> +
>>>> static const struct eth_dev_ops gve_eth_dev_ops = {
>>>> .dev_configure = gve_dev_configure,
>>>> .dev_start = gve_dev_start,
>>>> .dev_stop = gve_dev_stop,
>>>> .dev_close = gve_dev_close,
>>>> -.dev_infos_get = gve_dev_info_get,
>>>> +.dev_infos_get = gve_dev_infos_get,
>>>> .rx_queue_setup = gve_rx_queue_setup,
>>>> .tx_queue_setup = gve_tx_queue_setup,
>>>> .rx_queue_release = gve_rx_queue_release,
>>>> .tx_queue_release = gve_tx_queue_release,
>>>> .link_update = gve_link_update,
>>>> -.stats_get = gve_dev_stats_get,
>>>> -.stats_reset = gve_dev_stats_reset,
>>>> -.mtu_set = gve_dev_mtu_set,
>>>> +.stats_get = gve_stats_get,
>>>> +.stats_reset = gve_stats_reset,
>>>> +.mtu_set = gve_mtu_set,
>>>> +.xstats_get = gve_xstats_get,
>>>> +.xstats_reset = gve_xstats_reset,
>>>> +.xstats_get_names = gve_xstats_get_names,
>>>> };
>>>>
>>>> static void
>>>> diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
>>>> index 66fbcf3930..7687977003 100644
>>>> --- a/drivers/net/gve/gve_rx.c
>>>> +++ b/drivers/net/gve/gve_rx.c
>>>> @@ -22,8 +22,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
>>>> if (diag < 0) {
>>>> for (i = 0; i < nb_alloc; i++) {
>>>> nmb = rte_pktmbuf_alloc(rxq->mpool);
>>>> -if (!nmb)
>>>> +if (!nmb) {
>>>> +rxq->no_mbufs++;
>>>
>>> Why this is needed, original code is as following:
>>>
>>> ``
>>> for (i = 0; i < nb_alloc; i++) {
>>> nmb = rte_pktmbuf_alloc(rxq->mpool);
>>> if (!nmb)
>>> break;
>>> rxq->sw_ring[idx + i] = nmb;
>>> }
>>> if (i != nb_alloc) {
>>> rxq->no_mbufs += nb_alloc - i;
>>> nb_alloc = i;
>>> }
>>> ``
>>>
>>> "if (i != nb_alloc)" block already takes care of 'rxq->no_mbufs' value,
>>> is an additional increment required in the for loop?
>>>
>>>
>>> And change is unrelated with the patch anyway.
>>>
>>>> break;
>>>> +}
>>>> rxq->sw_ring[idx + i] = nmb;
>>>> }
>>>> if (i != nb_alloc) {
>>>> @@ -57,8 +59,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
>>>> if (diag < 0) {
>>>> for (i = 0; i < nb_alloc; i++) {
>>>> nmb = rte_pktmbuf_alloc(rxq->mpool);
>>>> -if (!nmb)
>>>> +if (!nmb) {
>>>> +rxq->no_mbufs++;
>>>> break;
>>>> +}
>>>> rxq->sw_ring[idx + i] = nmb;
>>>> }
>>>> nb_alloc = i;
>>
>
[-- Attachment #2: Type: text/html, Size: 15306 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] net/gve: change offloading capability
2023-02-17 9:11 ` Guo, Junfeng
2023-02-17 9:15 ` Levend Sayar
@ 2023-02-20 15:43 ` Ferruh Yigit
1 sibling, 0 replies; 11+ messages in thread
From: Ferruh Yigit @ 2023-02-20 15:43 UTC (permalink / raw)
To: Guo, Junfeng, Levend Sayar; +Cc: dev
On 2/17/2023 9:11 AM, Guo, Junfeng wrote:
>
>
>> -----Original Message-----
>> From: Levend Sayar <levendsayar@gmail.com>
>> Sent: Friday, February 17, 2023 02:58
>> To: Guo, Junfeng <junfeng.guo@intel.com>
>> Cc: dev@dpdk.org; Levend Sayar <levendsayar@gmail.com>
>> Subject: [PATCH 1/2] net/gve: change offloading capability
>>
>> Google Virtual NIC is not doing IPv4 checksummimg.
>> Removed that capability from PMD.
>>
>> Signed-off-by: Levend Sayar <levendsayar@gmail.com>
>> ---
>
> Acked-by: Junfeng Guo <junfeng.guo@intel.com>
>
> Acked-by: Rushil Gupta <rushilg@google.com>
>
Fixes: a46583cf43c8 ("net/gve: support Rx/Tx")
Cc: stable@dpdk.org
Added Rushil's name to .mailmap while merging.
Patch 2/2 is not merged and it can progress independently.
Applied to dpdk-next-net/main, thanks.
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2023-02-20 15:43 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-16 18:58 [PATCH 1/2] net/gve: change offloading capability Levend Sayar
2023-02-16 18:58 ` [PATCH 2/2] net/gve: add extended statistics Levend Sayar
2023-02-17 12:34 ` Ferruh Yigit
2023-02-19 0:26 ` Levend Sayar
2023-02-19 20:09 ` Ferruh Yigit
2023-02-19 20:37 ` Levend Sayar
2023-02-16 20:14 ` [PATCH 1/2] net/gve: change offloading capability Rushil Gupta
2023-02-17 9:07 ` Levend Sayar
2023-02-17 9:11 ` Guo, Junfeng
2023-02-17 9:15 ` Levend Sayar
2023-02-20 15:43 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).