From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id F36D8A00C2;
	Thu, 24 Nov 2022 08:36:30 +0100 (CET)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id DA5BD42DA9;
	Thu, 24 Nov 2022 08:36:30 +0100 (CET)
Received: from mga17.intel.com (mga17.intel.com [192.55.52.151])
 by mails.dpdk.org (Postfix) with ESMTP id 7D22D42C4D
 for <dev@dpdk.org>; Thu, 24 Nov 2022 08:36:28 +0100 (CET)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple;
 d=intel.com; i=@intel.com; q=dns/txt; s=Intel;
 t=1669275388; x=1700811388;
 h=from:to:cc:subject:date:message-id:mime-version:
 content-transfer-encoding;
 bh=pCCisng+i6qYNBZ4cF39D+HWghG0SL9ReFA8EeKBYbo=;
 b=lJcegeplhQTu8ZyteV+EkQog+pElR6++D4EA9jUUEyFBmhJYr87JCgii
 /wLpx/doyWLoMCYAZiLxVmzOk6/nPCw6h4fO1d+7jp8gvfyviQNYvv4wc
 28wEk38VjPSb5L5ytFzbS3gzUr5US/NsowhSrRqZee9C5JEfbDhiy4fBv
 bcBkViLkZC2Stx3pwndfsDlpI0dRIDP304R2NsfQC28oGXloSlpF83OXS
 6HCxe4YzUp+YjJ4RQAL8laGz+6nKYh4T5EBDuYaqjp0wB3fDCTv6qw2tp
 yPbg8nILjlcyR5osNZTIgoQUHEwlZohirG/JhF12ywI3f5rFlmBR7wM3A A==;
X-IronPort-AV: E=McAfee;i="6500,9779,10540"; a="294623113"
X-IronPort-AV: E=Sophos;i="5.96,189,1665471600"; d="scan'208";a="294623113"
Received: from fmsmga003.fm.intel.com ([10.253.24.29])
 by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 23 Nov 2022 23:36:27 -0800
X-ExtLoop1: 1
X-IronPort-AV: E=McAfee;i="6500,9779,10540"; a="731044356"
X-IronPort-AV: E=Sophos;i="5.96,189,1665471600"; d="scan'208";a="731044356"
Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])
 by FMSMGA003.fm.intel.com with ESMTP; 23 Nov 2022 23:36:21 -0800
From: Junfeng Guo <junfeng.guo@intel.com>
To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com,
 beilei.xing@intel.com
Cc: dev@dpdk.org, jeroendb@google.com, rushilg@google.com, jrkim@google.com,
 Junfeng Guo <junfeng.guo@intel.com>, Xiaoyun Li <xiaoyun.li@intel.com>
Subject: [PATCH] net/gve: add support for basic stats
Date: Thu, 24 Nov 2022 15:33:35 +0800
Message-Id: <20221124073335.3985214-1-junfeng.guo@intel.com>
X-Mailer: git-send-email 2.34.1
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Add support for dev_ops of stats_get and stats_reset.

Queue stats update will be moved into xstat [1], but the basic stats
items may still be required. So we just keep the remaining ones and
will implement the queue stats via xstats in the coming release.

[1]
https://elixir.bootlin.com/dpdk/v22.07/ \
	source/doc/guides/rel_notes/deprecation.rst#L118

Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/nics/features/gve.ini |  1 +
 drivers/net/gve/gve_ethdev.c     | 60 ++++++++++++++++++++++++++++++++
 drivers/net/gve/gve_ethdev.h     | 11 ++++++
 drivers/net/gve/gve_rx.c         | 15 ++++++--
 drivers/net/gve/gve_tx.c         | 13 +++++++
 5 files changed, 98 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/features/gve.ini b/doc/guides/nics/features/gve.ini
index cdc46b08a3..838edd456a 100644
--- a/doc/guides/nics/features/gve.ini
+++ b/doc/guides/nics/features/gve.ini
@@ -10,6 +10,7 @@ MTU update           = Y
 TSO                  = Y
 RSS hash             = Y
 L4 checksum offload  = Y
+Basic stats          = Y
 Linux                = Y
 x86-32               = Y
 x86-64               = Y
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 97781f0ed3..06d1b796c8 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -319,6 +319,64 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	return 0;
 }
 
+static int
+gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct gve_tx_queue *txq = dev->data->tx_queues[i];
+		if (txq == NULL)
+			continue;
+
+		stats->opackets += txq->packets;
+		stats->obytes += txq->bytes;
+		stats->oerrors += txq->errors;
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+		if (rxq == NULL)
+			continue;
+
+		stats->ipackets += rxq->packets;
+		stats->ibytes += rxq->bytes;
+		stats->ierrors += rxq->errors;
+		stats->rx_nombuf += rxq->no_mbufs;
+	}
+
+	return 0;
+}
+
+static int
+gve_dev_stats_reset(struct rte_eth_dev *dev)
+{
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct gve_tx_queue *txq = dev->data->tx_queues[i];
+		if (txq == NULL)
+			continue;
+
+		txq->packets  = 0;
+		txq->bytes = 0;
+		txq->errors = 0;
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct gve_rx_queue *rxq = dev->data->rx_queues[i];
+		if (rxq == NULL)
+			continue;
+
+		rxq->packets  = 0;
+		rxq->bytes = 0;
+		rxq->errors = 0;
+		rxq->no_mbufs = 0;
+	}
+
+	return 0;
+}
+
 static int
 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
@@ -357,6 +415,8 @@ static const struct eth_dev_ops gve_eth_dev_ops = {
 	.rx_queue_release     = gve_rx_queue_release,
 	.tx_queue_release     = gve_tx_queue_release,
 	.link_update          = gve_link_update,
+	.stats_get            = gve_dev_stats_get,
+	.stats_reset          = gve_dev_stats_reset,
 	.mtu_set              = gve_dev_mtu_set,
 };
 
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 235e55899e..64e571bcae 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -92,6 +92,11 @@ struct gve_tx_queue {
 	struct gve_queue_page_list *qpl;
 	struct gve_tx_iovec *iov_ring;
 
+	/* stats items */
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t errors;
+
 	uint16_t port_id;
 	uint16_t queue_id;
 
@@ -130,6 +135,12 @@ struct gve_rx_queue {
 	/* only valid for GQI_QPL queue format */
 	struct gve_queue_page_list *qpl;
 
+	/* stats items */
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t errors;
+	uint64_t no_mbufs;
+
 	struct gve_priv *hw;
 	const struct rte_memzone *qres_mz;
 	struct gve_queue_resources *qres;
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 518c9d109c..66fbcf3930 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -26,8 +26,10 @@ gve_rx_refill(struct gve_rx_queue *rxq)
 					break;
 				rxq->sw_ring[idx + i] = nmb;
 			}
-			if (i != nb_alloc)
+			if (i != nb_alloc) {
+				rxq->no_mbufs += nb_alloc - i;
 				nb_alloc = i;
+			}
 		}
 		rxq->nb_avail -= nb_alloc;
 		next_avail += nb_alloc;
@@ -88,6 +90,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	uint16_t rx_id = rxq->rx_tail;
 	struct rte_mbuf *rxe;
 	uint16_t nb_rx, len;
+	uint64_t bytes = 0;
 	uint64_t addr;
 	uint16_t i;
 
@@ -99,8 +102,10 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno)
 			break;
 
-		if (rxd->flags_seq & GVE_RXF_ERR)
+		if (rxd->flags_seq & GVE_RXF_ERR) {
+			rxq->errors++;
 			continue;
+		}
 
 		len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD;
 		rxe = rxq->sw_ring[rx_id];
@@ -135,6 +140,7 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 			rx_id = 0;
 
 		rx_pkts[nb_rx] = rxe;
+		bytes += len;
 		nb_rx++;
 	}
 
@@ -144,6 +150,11 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	if (rxq->nb_avail > rxq->free_thresh)
 		gve_rx_refill(rxq);
 
+	if (nb_rx) {
+		rxq->packets += nb_rx;
+		rxq->bytes += bytes;
+	}
+
 	return nb_rx;
 }
 
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index bf4e8fea2c..9b41c59358 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -260,6 +260,7 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	struct rte_mbuf *tx_pkt, *first;
 	uint16_t sw_id = txq->sw_tail;
 	uint16_t nb_used, i;
+	uint64_t bytes = 0;
 	uint16_t nb_tx = 0;
 	uint32_t hlen;
 
@@ -355,6 +356,8 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		txq->nb_free -= nb_used;
 		txq->sw_nb_free -= first->nb_segs;
 		tx_tail += nb_used;
+
+		bytes += first->pkt_len;
 	}
 
 end_of_tx:
@@ -362,6 +365,10 @@ gve_tx_burst_qpl(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
 		txq->tx_tail = tx_tail;
 		txq->sw_tail = sw_id;
+
+		txq->packets += nb_tx;
+		txq->bytes += bytes;
+		txq->errors += nb_pkts - nb_tx;
 	}
 
 	return nb_tx;
@@ -380,6 +387,7 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	struct rte_mbuf *tx_pkt, *first;
 	uint16_t nb_used, hlen, i;
 	uint64_t ol_flags, addr;
+	uint64_t bytes = 0;
 	uint16_t nb_tx = 0;
 
 	txr = txq->tx_desc_ring;
@@ -438,12 +446,17 @@ gve_tx_burst_ra(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		txq->nb_free -= nb_used;
 		tx_tail += nb_used;
+
+		bytes += first->pkt_len;
 	}
 
 end_of_tx:
 	if (nb_tx) {
 		rte_write32(rte_cpu_to_be_32(tx_tail), txq->qtx_tail);
 		txq->tx_tail = tx_tail;
+
+		txq->packets += nb_tx;
+		txq->bytes += bytes;
 	}
 
 	return nb_tx;
-- 
2.34.1