DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: jiawenwu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v2 22/56] net/txgbe: add RX and TX start and stop
Date: Mon,  5 Oct 2020 20:08:36 +0800	[thread overview]
Message-ID: <20201005120910.189343-23-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201005120910.189343-1-jiawenwu@trustnetic.com>

From: jiawenwu <jiawenwu@trustnetic.com>

Add receive and transmit units start and stop for specified queue.

Signed-off-by: jiawenwu <jiawenwu@trustnetic.com>
---
 doc/guides/nics/features/txgbe.ini  |   1 +
 drivers/net/txgbe/base/txgbe_type.h |   3 +
 drivers/net/txgbe/txgbe_ethdev.c    |   6 +
 drivers/net/txgbe/txgbe_ethdev.h    |  15 ++
 drivers/net/txgbe/txgbe_rxtx.c      | 256 ++++++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_rxtx.h      |  12 ++
 6 files changed, 293 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 707f64131..e76e9af46 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -7,6 +7,7 @@
 Speed capabilities   = Y
 Link status          = Y
 Link status event    = Y
+Queue start/stop     = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
 Unicast MAC filter   = Y
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 71a62d172..5ea693ed2 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -473,6 +473,9 @@ struct txgbe_hw {
 		TXGBE_SW_RESET,
 		TXGBE_GLOBAL_RESET
 	} reset_type;
+
+	u32 q_rx_regs[128 * 4];
+	u32 q_tx_regs[128 * 4];
 };
 
 #include "txgbe_regs.h"
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index a99018514..34e2a05a3 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -567,6 +567,8 @@ txgbe_dev_close(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	txgbe_dev_free_queues(dev);
+
 	dev->dev_ops = NULL;
 
 	/* disable uio intr before callback unregister */
@@ -1320,6 +1322,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.dev_infos_get              = txgbe_dev_info_get,
 	.dev_set_link_up            = txgbe_dev_set_link_up,
 	.dev_set_link_down          = txgbe_dev_set_link_down,
+	.rx_queue_start	            = txgbe_dev_rx_queue_start,
+	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
+	.tx_queue_start	            = txgbe_dev_tx_queue_start,
+	.tx_queue_stop              = txgbe_dev_tx_queue_stop,
 	.rx_queue_setup             = txgbe_dev_rx_queue_setup,
 	.rx_queue_release           = txgbe_dev_rx_queue_release,
 	.tx_queue_setup             = txgbe_dev_tx_queue_setup,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 6636b6e9a..1a29281a8 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -80,6 +80,8 @@ struct txgbe_adapter {
 /*
  * RX/TX function prototypes
  */
+void txgbe_dev_free_queues(struct rte_eth_dev *dev);
+
 void txgbe_dev_rx_queue_release(void *rxq);
 
 void txgbe_dev_tx_queue_release(void *txq);
@@ -97,6 +99,19 @@ int txgbe_dev_rx_init(struct rte_eth_dev *dev);
 
 void txgbe_dev_tx_init(struct rte_eth_dev *dev);
 
+void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
+void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
+void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
+void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
+
+int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
 			       uint8_t queue, uint8_t msix_vector);
 
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 492b5dacf..70c1ce49a 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -10,6 +10,9 @@
 #include <errno.h>
 
 #include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
 #include <rte_ethdev.h>
 #include <rte_ethdev_driver.h>
 #include <rte_memzone.h>
@@ -622,12 +625,64 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	return 0;
 }
 
+void
+txgbe_dev_free_queues(struct rte_eth_dev *dev)
+{
+	unsigned i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+		dev->data->rx_queues[i] = NULL;
+	}
+	dev->data->nb_rx_queues = 0;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+		dev->data->tx_queues[i] = NULL;
+	}
+	dev->data->nb_tx_queues = 0;
+}
+
 void __rte_cold
 txgbe_set_rx_function(struct rte_eth_dev *dev)
 {
 	RTE_SET_USED(dev);
 }
 
+static int __rte_cold
+txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
+{
+	struct txgbe_rx_entry *rxe = rxq->sw_ring;
+	uint64_t dma_addr;
+	unsigned int i;
+
+	/* Initialize software ring entries */
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		volatile struct txgbe_rx_desc *rxd;
+		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+		if (mbuf == NULL) {
+			PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+				     (unsigned) rxq->queue_id);
+			return -ENOMEM;
+		}
+
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+		rxd = &rxq->rx_ring[i];
+		TXGBE_RXD_HDRADDR(rxd, 0);
+		TXGBE_RXD_PKTADDR(rxd, dma_addr);
+		rxe[i].mbuf = mbuf;
+	}
+
+	return 0;
+}
+
 /**
  * txgbe_get_rscctl_maxdesc
  *
@@ -958,3 +1013,204 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev)
 	}
 }
 
+void
+txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+	*(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
+	*(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
+	*(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
+}
+
+void
+txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+	wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
+	wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
+	wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
+}
+
+void
+txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+	*(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
+	*(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
+	*(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
+}
+
+void
+txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+	wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
+	wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
+	wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_rx_queue *rxq;
+	uint32_t rxdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+
+	/* Allocate buffers for descriptor rings */
+	if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+		PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+			     rx_queue_id);
+		return -1;
+	}
+	rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+	rxdctl |= TXGBE_RXCFG_ENA;
+	wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
+
+	/* Wait until RX Enable ready */
+	poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+	} while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
+	if (!poll_ms)
+		PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+	rte_wmb();
+	wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
+	wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
+	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+	struct txgbe_rx_queue *rxq;
+	uint32_t rxdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+
+	txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
+	wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
+
+	/* Wait until RX Enable bit clear */
+	poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+	} while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
+	if (!poll_ms)
+		PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
+
+	rte_delay_us(RTE_TXGBE_WAIT_100_US);
+	txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
+
+	txgbe_rx_queue_release_mbufs(rxq);
+	txgbe_reset_rx_queue(adapter, rxq);
+	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_tx_queue *txq;
+	uint32_t txdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	txq = dev->data->tx_queues[tx_queue_id];
+	wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
+
+	/* Wait until TX Enable ready */
+	poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+	} while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
+	if (!poll_ms)
+		PMD_INIT_LOG(ERR, "Could not enable "
+			     "Tx Queue %d", tx_queue_id);
+
+	rte_wmb();
+	wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_tx_queue *txq;
+	uint32_t txdctl;
+	uint32_t txtdh, txtdt;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	txq = dev->data->tx_queues[tx_queue_id];
+
+	/* Wait until TX queue is empty */
+	poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_us(RTE_TXGBE_WAIT_100_US);
+		txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
+		txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
+	} while (--poll_ms && (txtdh != txtdt));
+	if (!poll_ms)
+		PMD_INIT_LOG(ERR,
+			"Tx Queue %d is not empty when stopping.",
+			tx_queue_id);
+
+	txgbe_dev_save_tx_queue(hw, txq->reg_idx);
+	wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
+
+	/* Wait until TX Enable bit clear */
+	poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+	} while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
+	if (!poll_ms)
+		PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+			tx_queue_id);
+
+	rte_delay_us(RTE_TXGBE_WAIT_100_US);
+	txgbe_dev_store_tx_queue(hw, txq->reg_idx);
+
+	if (txq->ops != NULL) {
+		txq->ops->release_mbufs(txq);
+		txq->ops->reset(txq);
+	}
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index 0132a59ca..1a02f8fde 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -42,6 +42,14 @@ struct txgbe_rx_desc {
 	} qw1; /* also as r.hdr_addr */
 };
 
+/* @txgbe_rx_desc.qw0 */
+#define TXGBE_RXD_PKTADDR(rxd, v)  \
+	(((volatile __le64 *)(rxd))[0] = cpu_to_le64(v))
+
+/* @txgbe_rx_desc.qw1 */
+#define TXGBE_RXD_HDRADDR(rxd, v)  \
+	(((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))
+
 /**
  * Transmit Data Descriptor (TXGBE_TXD_TYP=DATA)
  **/
@@ -59,6 +67,9 @@ struct txgbe_tx_desc {
 
 #define TXGBE_PTID_MASK                 0xFF
 
+#define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS  10
+#define RTE_TXGBE_WAIT_100_US               100
+
 #define TXGBE_TX_MAX_SEG                    40
 
 /**
@@ -136,6 +147,7 @@ struct txgbe_tx_queue {
 	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
 	volatile uint32_t   *tdc_reg_addr; /**< Address of TDC register. */
 	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
+	uint16_t            tx_tail;       /**< current value of TDT reg. */
 	/**< Start freeing TX buffers if there are less free descriptors than
 	     this value. */
 	uint16_t            tx_free_thresh;
-- 
2.18.4




  parent reply	other threads:[~2020-10-05 12:19 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-05 12:08 [dpdk-dev] [PATCH v2 00/56] net: txgbe PMD Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 01/56] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 02/56] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 03/56] net/txgbe: add device init and uninit Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 04/56] net/txgbe: add error types and registers Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 05/56] net/txgbe: add mac type and bus lan id Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 06/56] net/txgbe: add HW infrastructure and dummy function Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 07/56] net/txgbe: add EEPROM functions Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 08/56] net/txgbe: add HW init and reset operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 09/56] net/txgbe: add PHY init Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 10/56] net/txgbe: add module identify Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 11/56] net/txgbe: add PHY reset Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 12/56] net/txgbe: add info get operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 13/56] net/txgbe: add interrupt operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 14/56] net/txgbe: add device configure operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 15/56] net/txgbe: add link status change Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 16/56] net/txgbe: add multi-speed link setup Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 17/56] net/txgbe: add autoc read and write Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 18/56] net/txgbe: add MAC address operations Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 19/56] net/txgbe: add unicast hash bitmap Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 20/56] net/txgbe: add RX and TX init Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 21/56] net/txgbe: add RX and TX queues setup and release Jiawen Wu
2020-10-05 12:08 ` Jiawen Wu [this message]
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 23/56] net/txgbe: add packet type Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 24/56] net/txgbe: fill simple transmit function Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 25/56] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 26/56] net/txgbe: fill TX prepare funtion Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 27/56] net/txgbe: fill receive functions Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 28/56] net/txgbe: add device start operation Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 29/56] net/txgbe: add RX and TX data path start and stop Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 30/56] net/txgbe: add device stop and close operations Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 31/56] net/txgbe: support RX interrupt Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 32/56] net/txgbe: add RX and TX queue info get Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 33/56] net/txgbe: add device stats get Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 34/56] net/txgbe: add device xstats get Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 35/56] net/txgbe: add queue stats mapping Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 36/56] net/txgbe: add VLAN handle support Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 37/56] net/txgbe: add SWFW semaphore and lock Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 38/56] net/txgbe: add PF module init and uninit for SRIOV Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 39/56] net/txgbe: add process mailbox operation Jiawen Wu
2020-10-06 11:05   ` Ferruh Yigit
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 40/56] net/txgbe: add PF module configure for SRIOV Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 41/56] net/txgbe: add VMDq configure Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 42/56] net/txgbe: add RSS support Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 43/56] net/txgbe: add DCB support Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 44/56] net/txgbe: add flow control support Jiawen Wu
2020-10-05 12:08 ` [dpdk-dev] [PATCH v2 45/56] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 46/56] net/txgbe: add priority flow control support Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 47/56] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 48/56] net/txgbe: add MTU set operation Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 49/56] net/txgbe: add FW version get operation Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 50/56] net/txgbe: add EEPROM info " Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 51/56] net/txgbe: add register dump support Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 52/56] net/txgbe: support device LED on and off Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 53/56] net/txgbe: add mirror rule operations Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 54/56] net/txgbe: add PTP support Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 55/56] net/txgbe: add DCB info get operation Jiawen Wu
2020-10-05 12:09 ` [dpdk-dev] [PATCH v2 56/56] net/txgbe: add Rx and Tx descriptor status Jiawen Wu
2020-10-06 11:02 ` [dpdk-dev] [PATCH v2 00/56] net: txgbe PMD Ferruh Yigit
2020-10-09  3:03   ` jiawenwu
2020-10-09  9:47     ` Ferruh Yigit
2020-10-10  9:45       ` Jiawen Wu
2020-10-12  8:37         ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201005120910.189343-23-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).