DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v1 20/42] net/txgbe: add RX and TX stop
Date: Tue,  1 Sep 2020 19:50:51 +0800	[thread overview]
Message-ID: <20200901115113.1529675-20-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20200901115113.1529675-1-jiawenwu@trustnetic.com>

Add receive and transmit units stop for specified queue, release mbufs and free queues.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/base/txgbe_type.h |   3 +
 drivers/net/txgbe/txgbe_ethdev.c    |   7 +
 drivers/net/txgbe/txgbe_ethdev.h    |  15 ++
 drivers/net/txgbe/txgbe_rxtx.c      | 305 +++++++++++++++++++++++++++-
 drivers/net/txgbe/txgbe_rxtx.h      |  25 +++
 5 files changed, 354 insertions(+), 1 deletion(-)

diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 6229d8acc..c05e8e8b1 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -467,6 +467,9 @@ struct txgbe_hw {
 		TXGBE_SW_RESET,
 		TXGBE_GLOBAL_RESET
 	} reset_type;
+
+	u32 q_rx_regs[128 * 4];
+	u32 q_tx_regs[128 * 4];
 };
 
 #include "txgbe_regs.h"
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 4fab88c5c..80470c6e7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -599,6 +599,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 
 error:
 	PMD_INIT_LOG(ERR, "failure in txgbe_dev_start(): %d", err);
+	txgbe_dev_clear_queues(dev);
 	return -EIO;
 }
 
@@ -638,6 +639,8 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
 		hw->mac.disable_tx_laser(hw);
 	}
 
+	txgbe_dev_clear_queues(dev);
+
 	/* Clear stored conf */
 	dev->data->scattered_rx = 0;
 	dev->data->lro = 0;
@@ -1320,7 +1323,11 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.stats_get                  = txgbe_dev_stats_get,
 	.stats_reset                = txgbe_dev_stats_reset,
 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
+	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
 	.tx_queue_start	            = txgbe_dev_tx_queue_start,
+	.tx_queue_stop              = txgbe_dev_tx_queue_stop,
+	.rx_queue_release           = txgbe_dev_rx_queue_release,
+	.tx_queue_release           = txgbe_dev_tx_queue_release,
 	.dev_led_on                 = txgbe_dev_led_on,
 	.dev_led_off                = txgbe_dev_led_off,
 };
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2dc0327cb..f5ee1cae6 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -82,18 +82,33 @@ int txgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
 /*
  * RX/TX function prototypes
  */
+void txgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
 void txgbe_dev_free_queues(struct rte_eth_dev *dev);
 
+void txgbe_dev_rx_queue_release(void *rxq);
+
+void txgbe_dev_tx_queue_release(void *txq);
+
 int txgbe_dev_rx_init(struct rte_eth_dev *dev);
 
 void txgbe_dev_tx_init(struct rte_eth_dev *dev);
 
 int txgbe_dev_rxtx_start(struct rte_eth_dev *dev);
 
+void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
+void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
+void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
+void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
+
 int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 
+int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
 int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
 uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
 
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index ad5d1d22f..58824045b 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -15,6 +15,8 @@
 
 #include <rte_ethdev.h>
 #include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
 
 #include "txgbe_logs.h"
 #include "base/txgbe.h"
@@ -102,6 +104,22 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
 }
 
+static void __rte_cold
+txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
+{
+	if (txq != NULL && txq->ops != NULL) {
+		txq->ops->release_mbufs(txq);
+		txq->ops->free_swring(txq);
+		rte_free(txq);
+	}
+}
+
+void __rte_cold
+txgbe_dev_tx_queue_release(void *txq)
+{
+	txgbe_tx_queue_release(txq);
+}
+
 /* Takes an ethdev and a queue and sets up the tx function to be used based on
  * the queue parameters. Used in tx_queue_setup by primary process and then
  * in dev_init by secondary process when attaching to an existing ethdev.
@@ -129,10 +147,169 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
 	}
 }
 
+/**
+ * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m scattered cluster head
+ */
+static void __rte_cold
+txgbe_free_sc_cluster(struct rte_mbuf *m)
+{
+	uint16_t i, nb_segs = m->nb_segs;
+	struct rte_mbuf *next_seg;
+
+	for (i = 0; i < nb_segs; i++) {
+		next_seg = m->next;
+		rte_pktmbuf_free_seg(m);
+		m = next_seg;
+	}
+}
+
+static void __rte_cold
+txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
+{
+	unsigned i;
+
+	if (rxq->sw_ring != NULL) {
+		for (i = 0; i < rxq->nb_rx_desc; i++) {
+			if (rxq->sw_ring[i].mbuf != NULL) {
+				rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+				rxq->sw_ring[i].mbuf = NULL;
+			}
+		}
+		if (rxq->rx_nb_avail) {
+			for (i = 0; i < rxq->rx_nb_avail; ++i) {
+				struct rte_mbuf *mb;
+
+				mb = rxq->rx_stage[rxq->rx_next_avail + i];
+				rte_pktmbuf_free_seg(mb);
+			}
+			rxq->rx_nb_avail = 0;
+		}
+	}
+
+	if (rxq->sw_sc_ring)
+		for (i = 0; i < rxq->nb_rx_desc; i++)
+			if (rxq->sw_sc_ring[i].fbuf) {
+				txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+				rxq->sw_sc_ring[i].fbuf = NULL;
+			}
+}
+
+static void __rte_cold
+txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
+{
+	if (rxq != NULL) {
+		txgbe_rx_queue_release_mbufs(rxq);
+		rte_free(rxq->sw_ring);
+		rte_free(rxq->sw_sc_ring);
+		rte_free(rxq);
+	}
+}
+
+void __rte_cold
+txgbe_dev_rx_queue_release(void *rxq)
+{
+	txgbe_rx_queue_release(rxq);
+}
+
+/* Reset dynamic txgbe_rx_queue fields back to defaults */
+static void __rte_cold
+txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
+{
+	static const struct txgbe_rx_desc zeroed_desc = {{{0}, {0} }, {{0}, {0} } };
+	unsigned i;
+	uint16_t len = rxq->nb_rx_desc;
+
+	/*
+	 * By default, the Rx queue setup function allocates enough memory for
+	 * TXGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
+	 * extra memory at the end of the descriptor ring to be zero'd out.
+	 */
+	if (adapter->rx_bulk_alloc_allowed)
+		/* zero out extra memory */
+		len += RTE_PMD_TXGBE_RX_MAX_BURST;
+
+	/*
+	 * Zero out HW ring memory. Zero out extra memory at the end of
+	 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+	 * reads extra memory as zeros.
+	 */
+	for (i = 0; i < len; i++) {
+		rxq->rx_ring[i] = zeroed_desc;
+	}
+
+	/*
+	 * initialize extra software ring entries. Space for these extra
+	 * entries is always allocated
+	 */
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+	for (i = rxq->nb_rx_desc; i < len; ++i) {
+		rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
+	}
+
+	rxq->rx_nb_avail = 0;
+	rxq->rx_next_avail = 0;
+	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+
+}
+
+void __rte_cold
+txgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+	unsigned i;
+	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct txgbe_tx_queue *txq = dev->data->tx_queues[i];
+
+		if (txq != NULL) {
+			txq->ops->release_mbufs(txq);
+			txq->ops->reset(txq);
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+		if (rxq != NULL) {
+			txgbe_rx_queue_release_mbufs(rxq);
+			txgbe_reset_rx_queue(adapter, rxq);
+		}
+	}
+}
+
 void
 txgbe_dev_free_queues(struct rte_eth_dev *dev)
 {
-	RTE_SET_USED(dev);
+	unsigned i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+		dev->data->rx_queues[i] = NULL;
+	}
+	dev->data->nb_rx_queues = 0;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+		dev->data->tx_queues[i] = NULL;
+	}
+	dev->data->nb_tx_queues = 0;
 }
 
 static int __rte_cold
@@ -490,6 +667,41 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 	return 0;
 }
 
+void
+txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+	*(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
+	*(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
+	*(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
+}
+
+void
+txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+	wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
+	wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
+	wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
+}
+
+void
+txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+	*(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
+	*(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
+	*(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
+}
+
+void
+txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+	wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
+	wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
+	wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
+}
 
 /*
  * Start Receive Units for specified queue.
@@ -532,6 +744,44 @@ txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	return 0;
 }
 
+/*
+ * Stop Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+	struct txgbe_rx_queue *rxq;
+	uint32_t rxdctl;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+
+	txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
+	wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
+
+	/* Wait until RX Enable bit clear */
+	poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+	} while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
+	if (!poll_ms)
+		PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
+
+	rte_delay_us(RTE_TXGBE_WAIT_100_US);
+	txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
+
+	txgbe_rx_queue_release_mbufs(rxq);
+	txgbe_reset_rx_queue(adapter, rxq);
+	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
 /*
  * Start Transmit Units for specified queue.
  */
@@ -565,3 +815,56 @@ txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return 0;
 }
 
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_tx_queue *txq;
+	uint32_t txdctl;
+	uint32_t txtdh, txtdt;
+	int poll_ms;
+
+	PMD_INIT_FUNC_TRACE();
+
+	txq = dev->data->tx_queues[tx_queue_id];
+
+	/* Wait until TX queue is empty */
+	poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_us(RTE_TXGBE_WAIT_100_US);
+		txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
+		txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
+	} while (--poll_ms && (txtdh != txtdt));
+	if (!poll_ms)
+		PMD_INIT_LOG(ERR,
+			"Tx Queue %d is not empty when stopping.",
+			tx_queue_id);
+
+	txgbe_dev_save_tx_queue(hw, txq->reg_idx);
+	wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
+
+	/* Wait until TX Enable bit clear */
+	poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+	do {
+		rte_delay_ms(1);
+		txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+	} while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
+	if (!poll_ms)
+		PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+			tx_queue_id);
+
+	rte_delay_us(RTE_TXGBE_WAIT_100_US);
+	txgbe_dev_store_tx_queue(hw, txq->reg_idx);
+
+	if (txq->ops != NULL) {
+		txq->ops->release_mbufs(txq);
+		txq->ops->reset(txq);
+	}
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b8ca83672..72cbf1f87 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -54,6 +54,7 @@ struct txgbe_rx_desc {
 #define RTE_PMD_TXGBE_RX_MAX_BURST 32
 
 #define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS  10
+#define RTE_TXGBE_WAIT_100_US               100
 
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
@@ -62,6 +63,10 @@ struct txgbe_rx_entry {
 	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
 };
 
+struct txgbe_scattered_rx_entry {
+	struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
+};
+
 /**
  * Structure associated with each RX queue.
  */
@@ -70,7 +75,16 @@ struct txgbe_rx_queue {
 	volatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */
 	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
 	struct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */
+	struct txgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
+	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+	struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
 	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+	uint16_t            rx_tail;  /**< current value of RDT register. */
+	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
 	uint16_t            queue_id; /**< RX queue index. */
 	uint16_t            reg_idx;  /**< RX queue register index. */
 	uint16_t            port_id;  /**< Device port identifier. */
@@ -78,6 +92,10 @@ struct txgbe_rx_queue {
 	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
 	uint8_t             rx_deferred_start; /**< not in global dev start. */
 	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+	/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+	struct rte_mbuf fake_mbuf;
+	/** hold packets to return to application */
+	struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST*2];
 };
 
 /**
@@ -94,9 +112,16 @@ struct txgbe_tx_queue {
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
 	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	const struct txgbe_txq_ops *ops;       /**< txq ops */
 	uint8_t             tx_deferred_start; /**< not in global dev start. */
 };
 
+struct txgbe_txq_ops {
+	void (*release_mbufs)(struct txgbe_tx_queue *txq);
+	void (*free_swring)(struct txgbe_tx_queue *txq);
+	void (*reset)(struct txgbe_tx_queue *txq);
+};
+
 /* Takes an ethdev and a queue and sets up the tx function to be used based on
  * the queue parameters. Used in tx_queue_setup by primary process and then
  * in dev_init by secondary process when attaching to an existing ethdev.
-- 
2.18.4




  parent reply	other threads:[~2020-09-01 11:55 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-01 11:50 [dpdk-dev] [PATCH v1 01/42] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 02/42] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-09-09 17:50   ` Ferruh Yigit
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 03/42] net/txgbe: add device init and uninit Jiawen Wu
2020-09-09 17:52   ` Ferruh Yigit
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 04/42] net/txgbe: add error types and dummy function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 05/42] net/txgbe: add mac type and HW ops dummy Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 06/42] net/txgbe: add EEPROM functions Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 07/42] net/txgbe: add HW init function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 08/42] net/txgbe: add HW reset operation Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 09/42] net/txgbe: add PHY init Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 10/42] net/txgbe: add module identify Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 11/42] net/txgbe: add PHY reset Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 12/42] net/txgbe: add device start and stop Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 13/42] net/txgbe: add interrupt operation Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 14/42] net/txgbe: add link status change Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 15/42] net/txgbe: add multi-speed link setup Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 16/42] net/txgbe: add autoc read and write Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 17/42] net/txgbe: support device LED on and off Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 18/42] net/txgbe: add rx and tx init Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 19/42] net/txgbe: add RX and TX start Jiawen Wu
2020-09-01 11:50 ` Jiawen Wu [this message]
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 21/42] net/txgbe: add RX and TX queues setup Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 22/42] net/txgbe: add packet type Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 23/42] net/txgbe: fill simple transmit function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 24/42] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 25/42] net/txgbe: fill receive functions Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 26/42] net/txgbe: fill TX prepare funtion Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 27/42] net/txgbe: add device stats get Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 28/42] net/txgbe: add device xstats get Jiawen Wu
2020-09-09 17:53   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 29/42] net/txgbe: add queue stats mapping and enable RX DMA unit Jiawen Wu
2020-09-09 17:54   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 30/42] net/txgbe: add device info get Jiawen Wu
2020-09-09 17:54   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 31/42] net/txgbe: add MAC address operations Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 32/42] net/txgbe: add FW version get operation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 33/42] net/txgbe: add EEPROM info " Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 34/42] net/txgbe: add remaining RX and TX queue operations Jiawen Wu
2020-09-09 18:15   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 35/42] net/txgbe: add VLAN handle support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 36/42] net/txgbe: add flow control support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 37/42] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 38/42] net/txgbe: add DCB packet buffer allocation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 39/42] net/txgbe: configure DCB HW resources Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 40/42] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 41/42] net/txgbe: add MTU set operation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 42/42] net/txgbe: add register dump support Jiawen Wu
2020-09-09 17:48 ` [dpdk-dev] [PATCH v1 01/42] net/txgbe: add build and doc infrastructure Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200901115113.1529675-20-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).