DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, thomas@monjalon.net, arybchenko@solarflare.com
Subject: [dpdk-dev] [RFC PATCH 1/2] ethdev: introduce internal rxq/txq stats API
Date: Thu, 14 Mar 2019 16:13:15 +0100	[thread overview]
Message-ID: <1552576396-19906-1-git-send-email-david.marchand@redhat.com> (raw)
Message-ID: <20190314151315.NfDHxafPSEGLgaRgJ7whWmHlSRyxZot_NHdSh09jpZc@z> (raw)
In-Reply-To: <CAJFAV8wAM-k7mvzwo6fe1P5x=fM5s2-qyHvayJhZ_y2E=Hy6Jg@mail.gmail.com>

Introduce a new api to retrieve per queue statistics from the drivers.
The api objectives:
- easily add some common per queue statistics and have it exposed
  through the user xstats api while the user stats api is left untouched
- remove the limitations on the per queue statistics count (inherited
  from ixgbe) and avoid recurrent bugs on stats array overflow

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/librte_ethdev/rte_ethdev.c        | 191 ++++++++++++++++++++++++++++------
 lib/librte_ethdev/rte_ethdev_core.h   |  13 +++
 lib/librte_ethdev/rte_ethdev_driver.h |  18 ++++
 3 files changed, 192 insertions(+), 30 deletions(-)

diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index 85c1794..058fbd1 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -88,21 +88,30 @@ struct rte_eth_xstats_name_off {
 
 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
 
-static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
+static const struct rte_eth_xstats_name_off legacy_rxq_stats_map[] = {
 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
 };
+#define RTE_NB_LEGACY_RXQ_STATS RTE_DIM(legacy_rxq_stats_map)
+static const struct rte_eth_xstats_name_off rxq_stats_map[] = {
+	{"packets", offsetof(struct pmd_eth_rxq_stats, packets)},
+	{"bytes", offsetof(struct pmd_eth_rxq_stats, bytes)},
+	{"errors", offsetof(struct pmd_eth_rxq_stats, errors)},
+};
+#define RTE_NB_RXQ_STATS RTE_DIM(rxq_stats_map)
 
-#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /	\
-		sizeof(rte_rxq_stats_strings[0]))
-
-static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
+static const struct rte_eth_xstats_name_off legacy_txq_stats_map[] = {
 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
 };
-#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /	\
-		sizeof(rte_txq_stats_strings[0]))
+#define RTE_NB_LEGACY_TXQ_STATS RTE_DIM(legacy_txq_stats_map)
+static const struct rte_eth_xstats_name_off txq_stats_map[] = {
+	{"packets", offsetof(struct pmd_eth_txq_stats, packets)},
+	{"bytes", offsetof(struct pmd_eth_txq_stats, bytes)},
+	{"errors", offsetof(struct pmd_eth_txq_stats, errors)},
+};
+#define RTE_NB_TXQ_STATS RTE_DIM(txq_stats_map)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
 	{ DEV_RX_OFFLOAD_##_name, #_name }
@@ -1937,6 +1946,10 @@ struct rte_eth_dev *
 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
 {
 	struct rte_eth_dev *dev;
+	unsigned int nb_rxqs;
+	unsigned int nb_txqs;
+	unsigned int qid;
+	int ret;
 
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1945,7 +1958,44 @@ struct rte_eth_dev *
 
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
-	return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
+	ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
+	if (ret)
+		goto out;
+
+	if (!dev->dev_ops->rxq_stats_get)
+		goto skip_rxq;
+	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
+			  RTE_ETHDEV_QUEUE_STAT_CNTRS);
+	for (qid = 0; qid < nb_rxqs; qid++) {
+		struct pmd_eth_rxq_stats rxq_stats;
+
+		memset(&rxq_stats, 0, sizeof(rxq_stats));
+		if (dev->dev_ops->rxq_stats_get(dev, qid, &rxq_stats))
+			continue;
+
+		stats->q_ipackets[qid] = rxq_stats.packets;
+		stats->q_ibytes[qid] = rxq_stats.bytes;
+		stats->q_errors[qid] = rxq_stats.errors;
+	}
+
+skip_rxq:
+	if (!dev->dev_ops->txq_stats_get)
+		goto out;
+	nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
+			  RTE_ETHDEV_QUEUE_STAT_CNTRS);
+	for (qid = 0; qid < nb_txqs; qid++) {
+		struct pmd_eth_txq_stats txq_stats;
+
+		memset(&txq_stats, 0, sizeof(txq_stats));
+		if (dev->dev_ops->txq_stats_get(dev, qid, &txq_stats))
+			continue;
+
+		stats->q_opackets[qid] = txq_stats.packets;
+		stats->q_obytes[qid] = txq_stats.bytes;
+	}
+
+out:
+	return ret;
 }
 
 int
@@ -1969,12 +2019,24 @@ struct rte_eth_dev *
 	uint16_t nb_rxqs, nb_txqs;
 	int count;
 
-	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-
 	count = RTE_NB_STATS;
-	count += nb_rxqs * RTE_NB_RXQ_STATS;
-	count += nb_txqs * RTE_NB_TXQ_STATS;
+	if (dev->dev_ops->rxq_stats_get) {
+		nb_rxqs = dev->data->nb_rx_queues;
+		count += nb_rxqs * RTE_NB_RXQ_STATS;
+	} else {
+		nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
+				  RTE_ETHDEV_QUEUE_STAT_CNTRS);
+		count += nb_rxqs * RTE_NB_LEGACY_RXQ_STATS;
+	}
+
+	if (dev->dev_ops->txq_stats_get) {
+		nb_txqs = dev->data->nb_tx_queues;
+		count += nb_txqs * RTE_NB_TXQ_STATS;
+	} else {
+		nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
+				  RTE_ETHDEV_QUEUE_STAT_CNTRS);
+		count += nb_txqs * RTE_NB_LEGACY_TXQ_STATS;
+	}
 
 	return count;
 }
@@ -2065,27 +2127,59 @@ struct rte_eth_dev *
 			"%s", rte_stats_strings[idx].name);
 		cnt_used_entries++;
 	}
+
+	if (dev->dev_ops->rxq_stats_get) {
+		num_q = dev->data->nb_rx_queues;
+		for (id_queue = 0; id_queue < num_q; id_queue++) {
+			for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+				snprintf(xstats_names[cnt_used_entries].name,
+					sizeof(xstats_names[0].name),
+					"rx_q%u%s",
+					id_queue, rxq_stats_map[idx].name);
+				cnt_used_entries++;
+			}
+		}
+		goto skip_legacy_rxq;
+	}
+
 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
 	for (id_queue = 0; id_queue < num_q; id_queue++) {
-		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+		for (idx = 0; idx < RTE_NB_LEGACY_RXQ_STATS; idx++) {
 			snprintf(xstats_names[cnt_used_entries].name,
 				sizeof(xstats_names[0].name),
 				"rx_q%u%s",
-				id_queue, rte_rxq_stats_strings[idx].name);
+				id_queue, legacy_rxq_stats_map[idx].name);
 			cnt_used_entries++;
 		}
+	}
 
+skip_legacy_rxq:
+	if (dev->dev_ops->txq_stats_get) {
+		num_q = dev->data->nb_tx_queues;
+		for (id_queue = 0; id_queue < num_q; id_queue++) {
+			for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+				snprintf(xstats_names[cnt_used_entries].name,
+					sizeof(xstats_names[0].name),
+					"tx_q%u%s",
+					id_queue, txq_stats_map[idx].name);
+				cnt_used_entries++;
+			}
+		}
+		goto skip_legacy_txq;
 	}
+
 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
 	for (id_queue = 0; id_queue < num_q; id_queue++) {
-		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+		for (idx = 0; idx < RTE_NB_LEGACY_TXQ_STATS; idx++) {
 			snprintf(xstats_names[cnt_used_entries].name,
 				sizeof(xstats_names[0].name),
 				"tx_q%u%s",
-				id_queue, rte_txq_stats_strings[idx].name);
+				id_queue, legacy_txq_stats_map[idx].name);
 			cnt_used_entries++;
 		}
 	}
+
+skip_legacy_txq:
 	return cnt_used_entries;
 }
 
@@ -2252,9 +2346,6 @@ struct rte_eth_dev *
 
 	dev = &rte_eth_devices[port_id];
 
-	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-
 	/* global stats */
 	for (i = 0; i < RTE_NB_STATS; i++) {
 		stats_ptr = RTE_PTR_ADD(&eth_stats,
@@ -2264,26 +2355,71 @@ struct rte_eth_dev *
 	}
 
 	/* per-rxq stats */
+	if (dev->dev_ops->rxq_stats_get) {
+		nb_rxqs = dev->data->nb_rx_queues;
+		for (q = 0; q < nb_rxqs; q++) {
+			struct pmd_eth_rxq_stats rxq_stats;
+
+			memset(&rxq_stats, 0, sizeof(rxq_stats));
+			if (dev->dev_ops->rxq_stats_get(dev, q, &rxq_stats)) {
+				count += RTE_NB_RXQ_STATS;
+				continue;
+			}
+			for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+				stats_ptr = RTE_PTR_ADD(&rxq_stats,
+						rxq_stats_map[i].offset);
+				val = *stats_ptr;
+				xstats[count++].value = val;
+			}
+		}
+		goto skip_legacy_rxq;
+	}
+
+	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
 	for (q = 0; q < nb_rxqs; q++) {
-		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+		for (i = 0; i < RTE_NB_LEGACY_RXQ_STATS; i++) {
 			stats_ptr = RTE_PTR_ADD(&eth_stats,
-					rte_rxq_stats_strings[i].offset +
+					legacy_rxq_stats_map[i].offset +
 					q * sizeof(uint64_t));
 			val = *stats_ptr;
 			xstats[count++].value = val;
 		}
 	}
 
+skip_legacy_rxq:
 	/* per-txq stats */
+	if (dev->dev_ops->txq_stats_get) {
+		nb_txqs = dev->data->nb_tx_queues;
+		for (q = 0; q < nb_txqs; q++) {
+			struct pmd_eth_txq_stats txq_stats;
+
+			memset(&txq_stats, 0, sizeof(txq_stats));
+			if (dev->dev_ops->txq_stats_get(dev, q, &txq_stats)) {
+				count += RTE_NB_TXQ_STATS;
+				continue;
+			}
+			for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+				stats_ptr = RTE_PTR_ADD(&txq_stats,
+						txq_stats_map[i].offset);
+				val = *stats_ptr;
+				xstats[count++].value = val;
+			}
+		}
+		goto skip_legacy_txq;
+	}
+
+	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
 	for (q = 0; q < nb_txqs; q++) {
-		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+		for (i = 0; i < RTE_NB_LEGACY_TXQ_STATS; i++) {
 			stats_ptr = RTE_PTR_ADD(&eth_stats,
-					rte_txq_stats_strings[i].offset +
+					legacy_txq_stats_map[i].offset +
 					q * sizeof(uint64_t));
 			val = *stats_ptr;
 			xstats[count++].value = val;
 		}
 	}
+
+skip_legacy_txq:
 	return count;
 }
 
@@ -2387,19 +2523,14 @@ struct rte_eth_dev *
 	struct rte_eth_dev *dev;
 	unsigned int count = 0, i;
 	signed int xcount = 0;
-	uint16_t nb_rxqs, nb_txqs;
 	int ret;
 
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
 	dev = &rte_eth_devices[port_id];
 
-	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-
 	/* Return generic statistics */
-	count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
-		(nb_txqs * RTE_NB_TXQ_STATS);
+	count = get_xstats_basic_count(dev);
 
 	/* implemented by the driver */
 	if (dev->dev_ops->xstats_get != NULL) {
diff --git a/lib/librte_ethdev/rte_ethdev_core.h b/lib/librte_ethdev/rte_ethdev_core.h
index 8f03f83..63375fe 100644
--- a/lib/librte_ethdev/rte_ethdev_core.h
+++ b/lib/librte_ethdev/rte_ethdev_core.h
@@ -97,6 +97,16 @@ typedef int (*eth_xstats_get_names_by_id_t)(struct rte_eth_dev *dev,
 	unsigned int size);
 /**< @internal Get names of extended stats of an Ethernet device. */
 
+struct pmd_eth_rxq_stats;
+typedef int (*eth_rxq_stats_get)(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+	struct pmd_eth_rxq_stats *rx_queue_stats);
+/**< @internal Get statistics for a rx queue of an Ethernet device. */
+
+struct pmd_eth_txq_stats;
+typedef int (*eth_txq_stats_get)(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+	struct pmd_eth_txq_stats *tx_queue_stats);
+/**< @internal Get statistics for a tx queue of an Ethernet device. */
+
 typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
 					     uint16_t queue_id,
 					     uint8_t stat_idx,
@@ -501,6 +511,9 @@ struct eth_dev_ops {
 	eth_xstats_get_names_by_id_t xstats_get_names_by_id;
 	/**< Get name of extended device statistics by ID. */
 
+	eth_rxq_stats_get rxq_stats_get; /**< Stats per rxq */
+	eth_txq_stats_get txq_stats_get; /**< Stats per txq */
+
 	eth_tm_ops_get_t tm_ops_get;
 	/**< Get Traffic Management (TM) operations. */
 
diff --git a/lib/librte_ethdev/rte_ethdev_driver.h b/lib/librte_ethdev/rte_ethdev_driver.h
index c2ac263..33a4b22 100644
--- a/lib/librte_ethdev/rte_ethdev_driver.h
+++ b/lib/librte_ethdev/rte_ethdev_driver.h
@@ -331,6 +331,24 @@ typedef int (*ethdev_bus_specific_init)(struct rte_eth_dev *ethdev,
 int __rte_experimental
 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, ethdev_uninit_t ethdev_uninit);
 
+/**
+ * @internal
+ *
+ * Internal structures used by PMD to provide the per rx/tx queues to the
+ * ethdev layer.
+ */
+struct pmd_eth_rxq_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t errors;
+};
+
+struct pmd_eth_txq_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t errors;
+};
+
 #ifdef __cplusplus
 }
 #endif
-- 
1.8.3.1


  parent reply	other threads:[~2019-03-14 15:13 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-04 11:18 [dpdk-dev] [PATCH 00/12] rxq q_errors[] statistics fixes David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 01/12] net/af_packet: fix incorrect rxq errors stat David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 02/12] net/avp: " David Marchand
2019-03-04 12:18   ` Legacy, Allain
2019-03-04 11:18 ` [dpdk-dev] [PATCH 03/12] net/bnxt: " David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 04/12] net/cxgbe: " David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 05/12] net/kni: " David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 06/12] net/mlx4: " David Marchand
2019-03-05  8:19   ` Shahaf Shuler
2019-03-04 11:18 ` [dpdk-dev] [PATCH 07/12] net/mlx5: " David Marchand
2019-03-05  8:18   ` Shahaf Shuler
2019-03-04 11:18 ` [dpdk-dev] [PATCH 08/12] net/null: " David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 09/12] net/pcap: " David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 10/12] net/ring: " David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 11/12] net/szedata2: " David Marchand
2019-03-04 11:18 ` [dpdk-dev] [PATCH 12/12] net/tap: " David Marchand
2019-03-04 13:58   ` Wiles, Keith
2019-03-11 17:22 ` [dpdk-dev] [PATCH 00/12] rxq q_errors[] statistics fixes Ferruh Yigit
2019-03-11 18:09   ` David Marchand
2019-03-14 15:12     ` David Marchand
2019-03-14 15:12       ` David Marchand
2019-03-14 15:13       ` David Marchand [this message]
2019-03-14 15:13         ` [dpdk-dev] [RFC PATCH 1/2] ethdev: introduce internal rxq/txq stats API David Marchand
2019-03-14 15:13         ` [dpdk-dev] [RFC PATCH 2/2] net/af_packet: convert to new " David Marchand
2019-03-14 15:13           ` David Marchand
2019-03-15 13:30         ` [dpdk-dev] [RFC PATCH 1/2] ethdev: introduce internal " David Marchand
2019-03-15 13:30           ` David Marchand
2019-03-19 17:18         ` Ferruh Yigit
2019-03-19 17:18           ` Ferruh Yigit
2019-03-19 17:54           ` Stephen Hemminger
2019-03-19 17:54             ` Stephen Hemminger
2019-04-12 13:18             ` Thomas Monjalon
2019-04-12 13:18               ` Thomas Monjalon
2019-03-26  9:29           ` David Marchand
2019-03-26  9:29             ` David Marchand
2019-04-12 13:29             ` Thomas Monjalon
2019-04-12 13:29               ` Thomas Monjalon
2019-04-12 14:32               ` David Marchand
2019-04-12 14:32                 ` David Marchand
2019-04-12 16:05                 ` Stephen Hemminger
2019-04-12 16:05                   ` Stephen Hemminger
2019-04-12 15:07   ` [dpdk-dev] [PATCH 00/12] rxq q_errors[] statistics fixes Thomas Monjalon
2019-04-12 15:07     ` Thomas Monjalon
2019-04-12 15:38     ` Ferruh Yigit
2019-04-12 15:38       ` Ferruh Yigit
2019-04-12 15:45       ` Thomas Monjalon
2019-04-12 15:45         ` Thomas Monjalon
2019-04-12 15:57         ` Ferruh Yigit
2019-04-12 15:57           ` Ferruh Yigit
2019-05-28 21:38 ` Yigit, Ferruh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1552576396-19906-1-git-send-email-david.marchand@redhat.com \
    --to=david.marchand@redhat.com \
    --cc=arybchenko@solarflare.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).