DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v4 34/58] net/txgbe: add device xstats get
Date: Mon, 19 Oct 2020 16:53:51 +0800	[thread overview]
Message-ID: <20201019085415.82207-35-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201019085415.82207-1-jiawenwu@trustnetic.com>

Add device extended stats get from reading hardware registers.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/features/txgbe.ini |   1 +
 drivers/net/txgbe/txgbe_ethdev.c   | 378 +++++++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_ethdev.h   |   6 +
 3 files changed, 385 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 32df33dfc..e18632205 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -24,6 +24,7 @@ Inner L3 checksum    = P
 Inner L4 checksum    = P
 Packet type parsing  = Y
 Basic stats          = Y
+Extended stats       = Y
 Multiprocess aware   = Y
 Linux UIO            = Y
 Linux VFIO           = Y
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index f299fae29..9ead046e1 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -65,6 +65,144 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
 
 static const struct eth_dev_ops txgbe_eth_dev_ops;
 
+#define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
+#define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
+	/* MNG RxTx */
+	HW_XSTAT(mng_bmc2host_packets),
+	HW_XSTAT(mng_host2bmc_packets),
+	/* Basic RxTx */
+	HW_XSTAT(rx_packets),
+	HW_XSTAT(tx_packets),
+	HW_XSTAT(rx_bytes),
+	HW_XSTAT(tx_bytes),
+	HW_XSTAT(rx_total_bytes),
+	HW_XSTAT(rx_total_packets),
+	HW_XSTAT(tx_total_packets),
+	HW_XSTAT(rx_total_missed_packets),
+	HW_XSTAT(rx_broadcast_packets),
+	HW_XSTAT(rx_multicast_packets),
+	HW_XSTAT(rx_management_packets),
+	HW_XSTAT(tx_management_packets),
+	HW_XSTAT(rx_management_dropped),
+
+	/* Basic Error */
+	HW_XSTAT(rx_crc_errors),
+	HW_XSTAT(rx_illegal_byte_errors),
+	HW_XSTAT(rx_error_bytes),
+	HW_XSTAT(rx_mac_short_packet_dropped),
+	HW_XSTAT(rx_length_errors),
+	HW_XSTAT(rx_undersize_errors),
+	HW_XSTAT(rx_fragment_errors),
+	HW_XSTAT(rx_oversize_errors),
+	HW_XSTAT(rx_jabber_errors),
+	HW_XSTAT(rx_l3_l4_xsum_error),
+	HW_XSTAT(mac_local_errors),
+	HW_XSTAT(mac_remote_errors),
+
+	/* Flow Director */
+	HW_XSTAT(flow_director_added_filters),
+	HW_XSTAT(flow_director_removed_filters),
+	HW_XSTAT(flow_director_filter_add_errors),
+	HW_XSTAT(flow_director_filter_remove_errors),
+	HW_XSTAT(flow_director_matched_filters),
+	HW_XSTAT(flow_director_missed_filters),
+
+	/* FCoE */
+	HW_XSTAT(rx_fcoe_crc_errors),
+	HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
+	HW_XSTAT(rx_fcoe_dropped),
+	HW_XSTAT(rx_fcoe_packets),
+	HW_XSTAT(tx_fcoe_packets),
+	HW_XSTAT(rx_fcoe_bytes),
+	HW_XSTAT(tx_fcoe_bytes),
+	HW_XSTAT(rx_fcoe_no_ddp),
+	HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
+
+	/* MACSEC */
+	HW_XSTAT(tx_macsec_pkts_untagged),
+	HW_XSTAT(tx_macsec_pkts_encrypted),
+	HW_XSTAT(tx_macsec_pkts_protected),
+	HW_XSTAT(tx_macsec_octets_encrypted),
+	HW_XSTAT(tx_macsec_octets_protected),
+	HW_XSTAT(rx_macsec_pkts_untagged),
+	HW_XSTAT(rx_macsec_pkts_badtag),
+	HW_XSTAT(rx_macsec_pkts_nosci),
+	HW_XSTAT(rx_macsec_pkts_unknownsci),
+	HW_XSTAT(rx_macsec_octets_decrypted),
+	HW_XSTAT(rx_macsec_octets_validated),
+	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
+	HW_XSTAT(rx_macsec_sc_pkts_delayed),
+	HW_XSTAT(rx_macsec_sc_pkts_late),
+	HW_XSTAT(rx_macsec_sa_pkts_ok),
+	HW_XSTAT(rx_macsec_sa_pkts_invalid),
+	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
+	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
+	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
+
+	/* MAC RxTx */
+	HW_XSTAT(rx_size_64_packets),
+	HW_XSTAT(rx_size_65_to_127_packets),
+	HW_XSTAT(rx_size_128_to_255_packets),
+	HW_XSTAT(rx_size_256_to_511_packets),
+	HW_XSTAT(rx_size_512_to_1023_packets),
+	HW_XSTAT(rx_size_1024_to_max_packets),
+	HW_XSTAT(tx_size_64_packets),
+	HW_XSTAT(tx_size_65_to_127_packets),
+	HW_XSTAT(tx_size_128_to_255_packets),
+	HW_XSTAT(tx_size_256_to_511_packets),
+	HW_XSTAT(tx_size_512_to_1023_packets),
+	HW_XSTAT(tx_size_1024_to_max_packets),
+
+	/* Flow Control */
+	HW_XSTAT(tx_xon_packets),
+	HW_XSTAT(rx_xon_packets),
+	HW_XSTAT(tx_xoff_packets),
+	HW_XSTAT(rx_xoff_packets),
+
+	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
+	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
+	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
+	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
+};
+
+#define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
+			   sizeof(rte_txgbe_stats_strings[0]))
+
+/* Per-priority statistics */
+#define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
+	UP_XSTAT(rx_up_packets),
+	UP_XSTAT(tx_up_packets),
+	UP_XSTAT(rx_up_bytes),
+	UP_XSTAT(tx_up_bytes),
+	UP_XSTAT(rx_up_drop_packets),
+
+	UP_XSTAT(tx_up_xon_packets),
+	UP_XSTAT(rx_up_xon_packets),
+	UP_XSTAT(tx_up_xoff_packets),
+	UP_XSTAT(rx_up_xoff_packets),
+	UP_XSTAT(rx_up_dropped),
+	UP_XSTAT(rx_up_mbuf_alloc_errors),
+	UP_XSTAT(tx_up_xon2off_packets),
+};
+
+#define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
+			   sizeof(rte_txgbe_up_strings[0]))
+
+/* Per-queue statistics */
+#define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
+	QP_XSTAT(rx_qp_packets),
+	QP_XSTAT(tx_qp_packets),
+	QP_XSTAT(rx_qp_bytes),
+	QP_XSTAT(tx_qp_bytes),
+	QP_XSTAT(rx_qp_mc_packets),
+};
+
+#define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
+			   sizeof(rte_txgbe_qp_strings[0]))
+
 static inline int
 txgbe_is_sfp(struct txgbe_hw *hw)
 {
@@ -1211,6 +1349,241 @@ txgbe_dev_stats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+txgbe_xstats_calc_num(struct rte_eth_dev *dev)
+{
+	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+	return TXGBE_NB_HW_STATS +
+	       TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
+	       TXGBE_NB_QP_STATS * nb_queues;
+}
+
+static inline int
+txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
+{
+	int nb, st;
+
+	/* Extended stats from txgbe_hw_stats */
+	if (id < TXGBE_NB_HW_STATS) {
+		snprintf(name, size, "[hw]%s",
+			rte_txgbe_stats_strings[id].name);
+		return 0;
+	}
+	id -= TXGBE_NB_HW_STATS;
+
+	/* Priority Stats */
+	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+		nb = id / TXGBE_NB_UP_STATS;
+		st = id % TXGBE_NB_UP_STATS;
+		snprintf(name, size, "[p%u]%s", nb,
+			rte_txgbe_up_strings[st].name);
+		return 0;
+	}
+	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+	/* Queue Stats */
+	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+		nb = id / TXGBE_NB_QP_STATS;
+		st = id % TXGBE_NB_QP_STATS;
+		snprintf(name, size, "[q%u]%s", nb,
+			rte_txgbe_qp_strings[st].name);
+		return 0;
+	}
+	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+	return -(int)(id + 1);
+}
+
+static inline int
+txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
+{
+	int nb, st;
+
+	/* Extended stats from txgbe_hw_stats */
+	if (id < TXGBE_NB_HW_STATS) {
+		*offset = rte_txgbe_stats_strings[id].offset;
+		return 0;
+	}
+	id -= TXGBE_NB_HW_STATS;
+
+	/* Priority Stats */
+	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+		nb = id / TXGBE_NB_UP_STATS;
+		st = id % TXGBE_NB_UP_STATS;
+		*offset = rte_txgbe_up_strings[st].offset +
+			nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
+		return 0;
+	}
+	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+	/* Queue Stats */
+	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+		nb = id / TXGBE_NB_QP_STATS;
+		st = id % TXGBE_NB_QP_STATS;
+		*offset = rte_txgbe_qp_strings[st].offset +
+			nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
+		return 0;
+	}
+	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+	return -(int)(id + 1);
+}
+
+static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
+{
+	unsigned int i, count;
+
+	count = txgbe_xstats_calc_num(dev);
+	if (xstats_names == NULL)
+		return count;
+
+	/* Note: limit >= cnt_stats checked upstream
+	 * in rte_eth_xstats_names()
+	 */
+	limit = min(limit, count);
+
+	/* Extended stats from txgbe_hw_stats */
+	for (i = 0; i < limit; i++) {
+		if (txgbe_get_name_by_id(i, xstats_names[i].name,
+			sizeof(xstats_names[i].name))) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			break;
+		}
+	}
+
+	return i;
+}
+
+static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names,
+	const uint64_t *ids,
+	unsigned int limit)
+{
+	unsigned int i;
+
+	if (ids == NULL)
+		return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
+
+	for (i = 0; i < limit; i++) {
+		if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
+				sizeof(xstats_names[i].name))) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			return -1;
+		}
+	}
+
+	return i;
+}
+
+static int
+txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+					 unsigned int limit)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+	unsigned int i, count;
+
+	txgbe_read_stats_registers(hw, hw_stats);
+
+	/* If this is a reset xstats is NULL, and we have cleared the
+	 * registers by reading them.
+	 */
+	count = txgbe_xstats_calc_num(dev);
+	if (xstats == NULL)
+		return count;
+
+	limit = min(limit, txgbe_xstats_calc_num(dev));
+
+	/* Extended stats from txgbe_hw_stats */
+	for (i = 0; i < limit; i++) {
+		uint32_t offset = 0;
+
+		if (txgbe_get_offset_by_id(i, &offset)) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			break;
+		}
+		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
+		xstats[i].id = i;
+	}
+
+	return i;
+}
+
+static int
+txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
+					 unsigned int limit)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+	unsigned int i, count;
+
+	txgbe_read_stats_registers(hw, hw_stats);
+
+	/* If this is a reset xstats is NULL, and we have cleared the
+	 * registers by reading them.
+	 */
+	count = txgbe_xstats_calc_num(dev);
+	if (values == NULL)
+		return count;
+
+	limit = min(limit, txgbe_xstats_calc_num(dev));
+
+	/* Extended stats from txgbe_hw_stats */
+	for (i = 0; i < limit; i++) {
+		uint32_t offset;
+
+		if (txgbe_get_offset_by_id(i, &offset)) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			break;
+		}
+		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+	}
+
+	return i;
+}
+
+static int
+txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+		uint64_t *values, unsigned int limit)
+{
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+	unsigned int i;
+
+	if (ids == NULL)
+		return txgbe_dev_xstats_get_(dev, values, limit);
+
+	for (i = 0; i < limit; i++) {
+		uint32_t offset;
+
+		if (txgbe_get_offset_by_id(ids[i], &offset)) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			break;
+		}
+		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+	}
+
+	return i;
+}
+
+static int
+txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+
+	/* HW registers are cleared on read */
+	hw->offset_loaded = 0;
+	txgbe_read_stats_registers(hw, hw_stats);
+	hw->offset_loaded = 1;
+
+	/* Reset software totals */
+	memset(hw_stats, 0, sizeof(*hw_stats));
+
+	return 0;
+}
+
 static int
 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -1996,7 +2369,12 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.dev_reset                  = txgbe_dev_reset,
 	.link_update                = txgbe_dev_link_update,
 	.stats_get                  = txgbe_dev_stats_get,
+	.xstats_get                 = txgbe_dev_xstats_get,
+	.xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
 	.stats_reset                = txgbe_dev_stats_reset,
+	.xstats_reset               = txgbe_dev_xstats_reset,
+	.xstats_get_names           = txgbe_dev_xstats_get_names,
+	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index f82b400f6..e18ddc1c5 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -184,6 +184,12 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 #define TXGBE_DEFAULT_TX_HTHRESH      0
 #define TXGBE_DEFAULT_TX_WTHRESH      0
 
+/* store statistics names and its offset in stats structure */
+struct rte_txgbe_xstats_name_off {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	unsigned int offset;
+};
+
 const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
 				      struct rte_ether_addr *mc_addr_set,
-- 
2.18.4




  parent reply	other threads:[~2020-10-19  9:07 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-19  8:53 [dpdk-dev] [PATCH v4 00/58] net: txgbe PMD Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 01/58] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 02/58] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 03/58] net/txgbe: add device init and uninit Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 04/58] net/txgbe: add error types and registers Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 05/58] net/txgbe: add MAC type and bus lan id Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 06/58] net/txgbe: add HW infrastructure and dummy function Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 07/58] net/txgbe: add EEPROM functions Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 08/58] net/txgbe: add HW init and reset operation Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 09/58] net/txgbe: add PHY init Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 10/58] net/txgbe: add module identify Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 11/58] net/txgbe: add PHY reset Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 12/58] net/txgbe: add info get operation Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 13/58] net/txgbe: add interrupt operation Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 14/58] net/txgbe: add device configure operation Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 15/58] net/txgbe: add link status change Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 16/58] net/txgbe: add multi-speed link setup Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 17/58] net/txgbe: add autoc read and write Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 18/58] net/txgbe: add MAC address operations Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 19/58] net/txgbe: add unicast hash bitmap Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 20/58] net/txgbe: add Rx and Tx init Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 21/58] net/txgbe: add Rx and Tx queues setup and release Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 22/58] net/txgbe: add Rx and Tx start and stop Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 23/58] net/txgbe: add packet type Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 24/58] net/txgbe: fill simple transmit function Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 25/58] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 26/58] net/txgbe: fill Tx prepare function Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 27/58] net/txgbe: fill receive functions Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 28/58] net/txgbe: add device start operation Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 29/58] net/txgbe: add Rx and Tx data path start and stop Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 30/58] net/txgbe: add device stop and close operations Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 31/58] net/txgbe: support Rx interrupt Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 32/58] net/txgbe: add Rx and Tx queue info get Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 33/58] net/txgbe: add device stats get Jiawen Wu
2020-10-19  8:53 ` Jiawen Wu [this message]
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 35/58] net/txgbe: add queue stats mapping Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 36/58] net/txgbe: add VLAN handle support Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 37/58] net/txgbe: add SWFW semaphore and lock Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 38/58] net/txgbe: add PF module init and uninit for SRIOV Jiawen Wu
2020-10-26 14:54   ` Ferruh Yigit
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 39/58] net/txgbe: add process mailbox operation Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 40/58] net/txgbe: add PF module configure for SRIOV Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 41/58] net/txgbe: add VMDq configure Jiawen Wu
2020-10-19  8:53 ` [dpdk-dev] [PATCH v4 42/58] net/txgbe: add RSS support Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 43/58] net/txgbe: add DCB support Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 44/58] net/txgbe: add flow control support Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 45/58] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 46/58] net/txgbe: add priority flow control support Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 47/58] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 48/58] net/txgbe: add MTU set operation Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 49/58] net/txgbe: add FW version get operation Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 50/58] net/txgbe: add EEPROM info " Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 51/58] net/txgbe: add register dump support Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 52/58] net/txgbe: support device LED on and off Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 53/58] net/txgbe: add mirror rule operations Jiawen Wu
2020-10-26 14:54   ` Ferruh Yigit
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 54/58] net/txgbe: add PTP support Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 55/58] net/txgbe: add DCB info get operation Jiawen Wu
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 56/58] net/txgbe: add Rx and Tx descriptor status Jiawen Wu
2020-10-26 14:54   ` Ferruh Yigit
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 57/58] net/txgbe: change stop operation callback to return int Jiawen Wu
2020-10-26 14:55   ` Ferruh Yigit
2020-10-19  8:54 ` [dpdk-dev] [PATCH v4 58/58] net/txgbe: introduce log type in the driver documentation Jiawen Wu
2020-10-26 14:55   ` Ferruh Yigit
2020-10-22 11:23 ` [dpdk-dev] [PATCH v4 00/58] net: txgbe PMD Jiawen Wu
2020-10-22 11:44   ` Ferruh Yigit
2020-10-26 14:55 ` Ferruh Yigit
2020-10-27  2:39   ` Jiawen Wu
2020-10-27 11:37     ` Ferruh Yigit
2020-11-03 23:08       ` Thomas Monjalon
2020-11-04 17:24         ` Ferruh Yigit
2020-11-05  1:55           ` Jiawen Wu
2020-11-05  8:55           ` Jiawen Wu
2020-11-05  9:28             ` Thomas Monjalon
2020-11-06  6:28               ` Honnappa Nagarahalli
2020-11-06  9:22                 ` Jiawen Wu
2020-11-06 17:36                   ` Honnappa Nagarahalli
2020-11-06 18:21               ` Honnappa Nagarahalli
2020-11-06 19:00                 ` Thomas Monjalon
2020-11-06 19:56                   ` Honnappa Nagarahalli
2020-11-07  9:55                     ` Thomas Monjalon
2020-10-27  8:48   ` David Marchand
2020-10-27 11:36     ` Ferruh Yigit
2020-10-27 11:39       ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201019085415.82207-35-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).