DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: jiawenwu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v2 34/56] net/txgbe: add device xstats get
Date: Mon,  5 Oct 2020 20:08:48 +0800	[thread overview]
Message-ID: <20201005120910.189343-35-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201005120910.189343-1-jiawenwu@trustnetic.com>

From: jiawenwu <jiawenwu@trustnetic.com>

Add device extended stats get from reading hardware registers.

Signed-off-by: jiawenwu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/features/txgbe.ini |   1 +
 drivers/net/txgbe/txgbe_ethdev.c   | 383 +++++++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_ethdev.h   |   6 +
 3 files changed, 390 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 32df33dfc..e18632205 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -24,6 +24,7 @@ Inner L3 checksum    = P
 Inner L4 checksum    = P
 Packet type parsing  = Y
 Basic stats          = Y
+Extended stats       = Y
 Multiprocess aware   = Y
 Linux UIO            = Y
 Linux VFIO           = Y
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b1d321958..4b16bda69 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -65,6 +65,144 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
 
 static const struct eth_dev_ops txgbe_eth_dev_ops;
 
+#define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
+#define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
+	/* MNG RxTx */
+	HW_XSTAT(mng_bmc2host_packets),
+	HW_XSTAT(mng_host2bmc_packets),
+	/* Basic RxTx */
+	HW_XSTAT(rx_packets),
+	HW_XSTAT(tx_packets),
+	HW_XSTAT(rx_bytes),
+	HW_XSTAT(tx_bytes),
+	HW_XSTAT(rx_total_bytes),
+	HW_XSTAT(rx_total_packets),
+	HW_XSTAT(tx_total_packets),
+	HW_XSTAT(rx_total_missed_packets),
+	HW_XSTAT(rx_broadcast_packets),
+	HW_XSTAT(rx_multicast_packets),
+	HW_XSTAT(rx_management_packets),
+	HW_XSTAT(tx_management_packets),
+	HW_XSTAT(rx_management_dropped),
+
+	/* Basic Error */
+	HW_XSTAT(rx_crc_errors),
+	HW_XSTAT(rx_illegal_byte_errors),
+	HW_XSTAT(rx_error_bytes),
+	HW_XSTAT(rx_mac_short_packet_dropped),
+	HW_XSTAT(rx_length_errors),
+	HW_XSTAT(rx_undersize_errors),
+	HW_XSTAT(rx_fragment_errors),
+	HW_XSTAT(rx_oversize_errors),
+	HW_XSTAT(rx_jabber_errors),
+	HW_XSTAT(rx_l3_l4_xsum_error),
+	HW_XSTAT(mac_local_errors),
+	HW_XSTAT(mac_remote_errors),
+
+	/* Flow Director */
+	HW_XSTAT(flow_director_added_filters),
+	HW_XSTAT(flow_director_removed_filters),
+	HW_XSTAT(flow_director_filter_add_errors),
+	HW_XSTAT(flow_director_filter_remove_errors),
+	HW_XSTAT(flow_director_matched_filters),
+	HW_XSTAT(flow_director_missed_filters),
+
+	/* FCoE */
+	HW_XSTAT(rx_fcoe_crc_errors),
+	HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
+	HW_XSTAT(rx_fcoe_dropped),
+	HW_XSTAT(rx_fcoe_packets),
+	HW_XSTAT(tx_fcoe_packets),
+	HW_XSTAT(rx_fcoe_bytes),
+	HW_XSTAT(tx_fcoe_bytes),
+	HW_XSTAT(rx_fcoe_no_ddp),
+	HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
+
+	/* MACSEC */
+	HW_XSTAT(tx_macsec_pkts_untagged),
+	HW_XSTAT(tx_macsec_pkts_encrypted),
+	HW_XSTAT(tx_macsec_pkts_protected),
+	HW_XSTAT(tx_macsec_octets_encrypted),
+	HW_XSTAT(tx_macsec_octets_protected),
+	HW_XSTAT(rx_macsec_pkts_untagged),
+	HW_XSTAT(rx_macsec_pkts_badtag),
+	HW_XSTAT(rx_macsec_pkts_nosci),
+	HW_XSTAT(rx_macsec_pkts_unknownsci),
+	HW_XSTAT(rx_macsec_octets_decrypted),
+	HW_XSTAT(rx_macsec_octets_validated),
+	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
+	HW_XSTAT(rx_macsec_sc_pkts_delayed),
+	HW_XSTAT(rx_macsec_sc_pkts_late),
+	HW_XSTAT(rx_macsec_sa_pkts_ok),
+	HW_XSTAT(rx_macsec_sa_pkts_invalid),
+	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
+	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
+	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
+
+	/* MAC RxTx */
+	HW_XSTAT(rx_size_64_packets),
+	HW_XSTAT(rx_size_65_to_127_packets),
+	HW_XSTAT(rx_size_128_to_255_packets),
+	HW_XSTAT(rx_size_256_to_511_packets),
+	HW_XSTAT(rx_size_512_to_1023_packets),
+	HW_XSTAT(rx_size_1024_to_max_packets),
+	HW_XSTAT(tx_size_64_packets),
+	HW_XSTAT(tx_size_65_to_127_packets),
+	HW_XSTAT(tx_size_128_to_255_packets),
+	HW_XSTAT(tx_size_256_to_511_packets),
+	HW_XSTAT(tx_size_512_to_1023_packets),
+	HW_XSTAT(tx_size_1024_to_max_packets),
+
+	/* Flow Control */
+	HW_XSTAT(tx_xon_packets),
+	HW_XSTAT(rx_xon_packets),
+	HW_XSTAT(tx_xoff_packets),
+	HW_XSTAT(rx_xoff_packets),
+
+	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
+	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
+	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
+	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
+};
+
+#define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
+			   sizeof(rte_txgbe_stats_strings[0]))
+
+/* Per-priority statistics */
+#define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
+	UP_XSTAT(rx_up_packets),
+	UP_XSTAT(tx_up_packets),
+	UP_XSTAT(rx_up_bytes),
+	UP_XSTAT(tx_up_bytes),
+	UP_XSTAT(rx_up_drop_packets),
+
+	UP_XSTAT(tx_up_xon_packets),
+	UP_XSTAT(rx_up_xon_packets),
+	UP_XSTAT(tx_up_xoff_packets),
+	UP_XSTAT(rx_up_xoff_packets),
+	UP_XSTAT(rx_up_dropped),
+	UP_XSTAT(rx_up_mbuf_alloc_errors),
+	UP_XSTAT(tx_up_xon2off_packets),
+};
+
+#define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
+			   sizeof(rte_txgbe_up_strings[0]))
+
+/* Per-queue statistics */
+#define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
+	QP_XSTAT(rx_qp_packets),
+	QP_XSTAT(tx_qp_packets),
+	QP_XSTAT(rx_qp_bytes),
+	QP_XSTAT(tx_qp_bytes),
+	QP_XSTAT(rx_qp_mc_packets),
+};
+
+#define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
+			   sizeof(rte_txgbe_qp_strings[0]))
+
 static inline int
 txgbe_is_sfp(struct txgbe_hw *hw)
 {
@@ -1204,6 +1342,246 @@ txgbe_dev_stats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+txgbe_xstats_calc_num(struct rte_eth_dev *dev)
+{
+	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+	return TXGBE_NB_HW_STATS +
+	       TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
+	       TXGBE_NB_QP_STATS * nb_queues;
+}
+
+static inline int
+txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
+{
+	int nb, st;
+
+	/* Extended stats from txgbe_hw_stats */
+	if (id < TXGBE_NB_HW_STATS) {
+		snprintf(name, size, "[hw]%s",
+			rte_txgbe_stats_strings[id].name);
+		return 0;
+	}
+	id -= TXGBE_NB_HW_STATS;
+
+	/* Priority Stats */
+	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+		nb = id / TXGBE_NB_UP_STATS;
+		st = id % TXGBE_NB_UP_STATS;
+		snprintf(name, size, "[p%u]%s", nb,
+			rte_txgbe_up_strings[st].name);
+		return 0;
+	}
+	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+	/* Queue Stats */
+	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+		nb = id / TXGBE_NB_QP_STATS;
+		st = id % TXGBE_NB_QP_STATS;
+		snprintf(name, size, "[q%u]%s", nb,
+			rte_txgbe_qp_strings[st].name);
+		return 0;
+	}
+	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+	return -(int)(id + 1);
+}
+
+static inline int
+txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
+{
+	int nb, st;
+
+	/* Extended stats from txgbe_hw_stats */
+	if (id < TXGBE_NB_HW_STATS) {
+		*offset = rte_txgbe_stats_strings[id].offset;
+		return 0;
+	}
+	id -= TXGBE_NB_HW_STATS;
+
+	/* Priority Stats */
+	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+		nb = id / TXGBE_NB_UP_STATS;
+		st = id % TXGBE_NB_UP_STATS;
+		*offset = rte_txgbe_up_strings[st].offset +
+			nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
+		return 0;
+	}
+	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+	/* Queue Stats */
+	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+		nb = id / TXGBE_NB_QP_STATS;
+		st = id % TXGBE_NB_QP_STATS;
+		*offset = rte_txgbe_qp_strings[st].offset +
+			nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
+		return 0;
+	}
+	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+	return -(int)(id + 1);
+}
+
+static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
+{
+	unsigned i, count;
+
+	count = txgbe_xstats_calc_num(dev);
+	if (xstats_names == NULL) {
+		return count;
+	}
+
+	/* Note: limit >= cnt_stats checked upstream
+	 * in rte_eth_xstats_names()
+	 */
+	limit = min(limit, count);
+
+	/* Extended stats from txgbe_hw_stats */
+	for (i = 0; i < limit; i++) {
+		if (txgbe_get_name_by_id(i, xstats_names[i].name,
+			sizeof(xstats_names[i].name))) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			break;
+		}
+	}
+
+	return i;
+}
+
+static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
+	struct rte_eth_xstat_name *xstats_names,
+	const uint64_t *ids,
+	unsigned int limit)
+{
+	unsigned i;
+
+	if (ids == NULL) {
+		return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
+	}
+
+	for (i = 0; i < limit; i++) {
+		if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
+				sizeof(xstats_names[i].name))) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			return -1;
+		}
+	}
+
+	return i;
+}
+
+static int
+txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+					 unsigned limit)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+	unsigned i, count;
+
+	txgbe_read_stats_registers(hw, hw_stats);
+
+	/* If this is a reset xstats is NULL, and we have cleared the
+	 * registers by reading them.
+	 */
+	count = txgbe_xstats_calc_num(dev);
+	if (xstats == NULL) {
+		return count;
+	}
+
+	limit = min(limit, txgbe_xstats_calc_num(dev));
+
+	/* Extended stats from txgbe_hw_stats */
+	for (i = 0; i < limit; i++) {
+		uint32_t offset = 0;
+
+		if (txgbe_get_offset_by_id(i, &offset)) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			break;
+		}
+		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
+		xstats[i].id = i;
+	}
+
+	return i;
+}
+
+static int
+txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
+					 unsigned limit)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+	unsigned i, count;
+
+	txgbe_read_stats_registers(hw, hw_stats);
+
+	/* If this is a reset xstats is NULL, and we have cleared the
+	 * registers by reading them.
+	 */
+	count = txgbe_xstats_calc_num(dev);
+	if (values == NULL) {
+		return count;
+	}
+
+	limit = min(limit, txgbe_xstats_calc_num(dev));
+
+	/* Extended stats from txgbe_hw_stats */
+	for (i = 0; i < limit; i++) {
+		uint32_t offset;
+
+		if (txgbe_get_offset_by_id(i, &offset)) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			break;
+		}
+		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+	}
+
+	return i;
+}
+
+static int
+txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+		uint64_t *values, unsigned int limit)
+{
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+	unsigned i;
+
+	if (ids == NULL) {
+		return txgbe_dev_xstats_get_(dev, values, limit);
+	}
+
+	for (i = 0; i < limit; i++) {
+		uint32_t offset;
+
+		if (txgbe_get_offset_by_id(ids[i], &offset)) {
+			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+			break;
+		}
+		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+	}
+
+	return i;
+}
+
+static int
+txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+
+	/* HW registers are cleared on read */
+	hw->offset_loaded = 0;
+	txgbe_read_stats_registers(hw, hw_stats);
+	hw->offset_loaded = 1;
+
+	/* Reset software totals */
+	memset(hw_stats, 0, sizeof(*hw_stats));
+
+	return 0;
+}
+
 static int
 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -1988,7 +2366,12 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.dev_reset                  = txgbe_dev_reset,
 	.link_update                = txgbe_dev_link_update,
 	.stats_get                  = txgbe_dev_stats_get,
+	.xstats_get                 = txgbe_dev_xstats_get,
+	.xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
 	.stats_reset                = txgbe_dev_stats_reset,
+	.xstats_reset               = txgbe_dev_xstats_reset,
+	.xstats_get_names           = txgbe_dev_xstats_get_names,
+	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index f82b400f6..6bcab3cfc 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -184,6 +184,12 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
 #define TXGBE_DEFAULT_TX_HTHRESH      0
 #define TXGBE_DEFAULT_TX_WTHRESH      0
 
+/* store statistics names and its offset in stats structure */
+struct rte_txgbe_xstats_name_off {
+	char name[RTE_ETH_XSTATS_NAME_SIZE];
+	unsigned offset;
+};
+
 const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
 				      struct rte_ether_addr *mc_addr_set,
-- 
2.18.4




  parent reply	other threads:[~2020-10-05 12:22 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-05 12:08 [dpdk-dev] [PATCH v2 00/56] net: txgbe PMD Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 01/56] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 02/56] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 03/56] net/txgbe: add device init and uninit Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 04/56] net/txgbe: add error types and registers Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 05/56] net/txgbe: add mac type and bus lan id Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 06/56] net/txgbe: add HW infrastructure and dummy function Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 07/56] net/txgbe: add EEPROM functions Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 08/56] net/txgbe: add HW init and reset operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 09/56] net/txgbe: add PHY init Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 10/56] net/txgbe: add module identify Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 11/56] net/txgbe: add PHY reset Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 12/56] net/txgbe: add info get operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 13/56] net/txgbe: add interrupt operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 14/56] net/txgbe: add device configure operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 15/56] net/txgbe: add link status change Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 16/56] net/txgbe: add multi-speed link setup Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 17/56] net/txgbe: add autoc read and write Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 18/56] net/txgbe: add MAC address operations Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 19/56] net/txgbe: add unicast hash bitmap Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 20/56] net/txgbe: add RX and TX init Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 21/56] net/txgbe: add RX and TX queues setup and release Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 22/56] net/txgbe: add RX and TX start and stop Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 23/56] net/txgbe: add packet type Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 24/56] net/txgbe: fill simple transmit function Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 25/56] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 26/56] net/txgbe: fill TX prepare funtion Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 27/56] net/txgbe: fill receive functions Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 28/56] net/txgbe: add device start operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 29/56] net/txgbe: add RX and TX data path start and stop Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 30/56] net/txgbe: add device stop and close operations Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 31/56] net/txgbe: support RX interrupt Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 32/56] net/txgbe: add RX and TX queue info get Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 33/56] net/txgbe: add device stats get Jiawen Wu
2020-10-05 12:08 ` Jiawen Wu [this message]
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 35/56] net/txgbe: add queue stats mapping Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 36/56] net/txgbe: add VLAN handle support Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 37/56] net/txgbe: add SWFW semaphore and lock Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 38/56] net/txgbe: add PF module init and uninit for SRIOV Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 39/56] net/txgbe: add process mailbox operation Jiawen Wu
2020-10-06 11:05   ` Ferruh Yigit
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 40/56] net/txgbe: add PF module configure for SRIOV Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 41/56] net/txgbe: add VMDq configure Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 42/56] net/txgbe: add RSS support Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 43/56] net/txgbe: add DCB support Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 44/56] net/txgbe: add flow control support Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 45/56] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 46/56] net/txgbe: add priority flow control support Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 47/56] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 48/56] net/txgbe: add MTU set operation Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 49/56] net/txgbe: add FW version get operation Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 50/56] net/txgbe: add EEPROM info " Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 51/56] net/txgbe: add register dump support Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 52/56] net/txgbe: support device LED on and off Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 53/56] net/txgbe: add mirror rule operations Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 54/56] net/txgbe: add PTP support Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 55/56] net/txgbe: add DCB info get operation Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 56/56] net/txgbe: add Rx and Tx descriptor status Jiawen Wu
2020-10-06 11:02 ` [dpdk-dev] [PATCH v2 00/56] net: txgbe PMD Ferruh Yigit
2020-10-09  3:03   ` jiawenwu
2020-10-09  9:47     ` Ferruh Yigit
2020-10-10  9:45       ` Jiawen Wu
2020-10-12  8:37         ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201005120910.189343-35-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).