DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v1 18/42] net/txgbe: add rx and tx init
Date: Tue,  1 Sep 2020 19:50:49 +0800	[thread overview]
Message-ID: <20200901115113.1529675-18-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20200901115113.1529675-1-jiawenwu@trustnetic.com>

Add receive and transmit initialize unit.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/base/txgbe_type.h |   3 +
 drivers/net/txgbe/txgbe_ethdev.c    |   3 +
 drivers/net/txgbe/txgbe_ethdev.h    |  28 +++
 drivers/net/txgbe/txgbe_rxtx.c      | 330 +++++++++++++++++++++++++++-
 drivers/net/txgbe/txgbe_rxtx.h      |  23 ++
 5 files changed, 381 insertions(+), 6 deletions(-)

diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 5fd51fece..6229d8acc 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -8,6 +8,9 @@
 #define TXGBE_LINK_UP_TIME	90 /* 9.0 Seconds */
 #define TXGBE_AUTO_NEG_TIME	45 /* 4.5 Seconds */
 
+#define TXGBE_FRAME_SIZE_MAX	(9728) /* Maximum frame size, +FCS */
+#define TXGBE_FRAME_SIZE_DFT	(1518) /* Default frame size, +FCS */
+
 #define TXGBE_ALIGN				128 /* as intel did */
 
 #include "txgbe_status.h"
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 1803ace01..abc457109 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -124,6 +124,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &txgbe_eth_dev_ops;
+	eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
+	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
+	eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
 
 	/*
 	 * For secondary processes, we don't initialise any further as primary
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index ff2b36f02..6739b580c 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -19,6 +19,12 @@
 #define TXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
 #define TXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
 
+/*
+ * Defines that were not part of txgbe_type.h as they are not used by the
+ * FreeBSD driver.
+ */
+#define TXGBE_VLAN_TAG_SIZE 4
+
 #define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT	500 /* 500us */
 
 #define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
@@ -46,6 +52,7 @@ struct txgbe_adapter {
 	struct txgbe_hw_stats       stats;
 	struct txgbe_interrupt      intr;
 	struct txgbe_vf_info        *vfdata;
+	bool rx_bulk_alloc_allowed;
 };
 
 struct txgbe_vf_representor {
@@ -82,6 +89,27 @@ int txgbe_dev_rx_init(struct rte_eth_dev *dev);
 void txgbe_dev_tx_init(struct rte_eth_dev *dev);
 
 int txgbe_dev_rxtx_start(struct rte_eth_dev *dev);
+
+uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+				    uint16_t nb_pkts);
+
+uint16_t txgbe_recv_pkts_lro_single_alloc(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
 			       uint8_t queue, uint8_t msix_vector);
 
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index cb067d4f4..d3782f44d 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -20,6 +20,87 @@
 #include "txgbe_ethdev.h"
 #include "txgbe_rxtx.h"
 
+uint16_t
+txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+		       uint16_t nb_pkts)
+{
+	RTE_SET_USED(tx_queue);
+	RTE_SET_USED(tx_pkts);
+	RTE_SET_USED(nb_pkts);
+
+	return 0;
+}
+
+uint16_t
+txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts)
+{
+	RTE_SET_USED(tx_queue);
+	RTE_SET_USED(tx_pkts);
+	RTE_SET_USED(nb_pkts);
+
+	return 0;
+}
+
+uint16_t
+txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	RTE_SET_USED(tx_queue);
+	RTE_SET_USED(tx_pkts);
+	RTE_SET_USED(nb_pkts);
+
+	return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_TXGBE_RX_MAX_BURST */
+uint16_t
+txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+			   uint16_t nb_pkts)
+{
+	RTE_SET_USED(rx_queue);
+	RTE_SET_USED(rx_pkts);
+	RTE_SET_USED(nb_pkts);
+
+	return 0;
+}
+
+uint16_t
+txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	RTE_SET_USED(rx_queue);
+	RTE_SET_USED(rx_pkts);
+	RTE_SET_USED(nb_pkts);
+
+	return 0;
+}
+
+static inline uint16_t
+txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+		    bool bulk_alloc)
+{
+	RTE_SET_USED(rx_queue);
+	RTE_SET_USED(rx_pkts);
+	RTE_SET_USED(nb_pkts);
+	RTE_SET_USED(bulk_alloc);
+
+	return 0;
+}
+
+uint16_t
+txgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+				 uint16_t nb_pkts)
+{
+	return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
+{
+	return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
+}
+
 /* Takes an ethdev and a queue and sets up the tx function to be used based on
  * the queue parameters. Used in tx_queue_setup by primary process and then
  * in dev_init by secondary process when attaching to an existing ethdev.
@@ -27,11 +108,26 @@
 void __rte_cold
 txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(txq);
+	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
+	if ((txq->offloads == 0) &&
+			(txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)) {
+		PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+		dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
+		dev->tx_pkt_prepare = NULL;
+	} else {
+		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+		PMD_INIT_LOG(DEBUG,
+				" - offloads = 0x%" PRIx64,
+				txq->offloads);
+		PMD_INIT_LOG(DEBUG,
+				" - tx_free_thresh = %lu " "[RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
+				(unsigned long)txq->tx_free_thresh,
+				(unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
+		dev->tx_pkt_burst = txgbe_xmit_pkts;
+		dev->tx_pkt_prepare = txgbe_prep_pkts;
+	}
 }
 
-
 void
 txgbe_dev_free_queues(struct rte_eth_dev *dev)
 {
@@ -41,7 +137,66 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
 void __rte_cold
 txgbe_set_rx_function(struct rte_eth_dev *dev)
 {
-	RTE_SET_USED(dev);
+	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+	/*
+	 * Initialize the appropriate LRO callback.
+	 *
+	 * If all queues satisfy the bulk allocation preconditions
+	 * (adapter->rx_bulk_alloc_allowed is TRUE) then we may use
+	 * bulk allocation. Otherwise use a single allocation version.
+	 */
+	if (dev->data->lro) {
+		if (adapter->rx_bulk_alloc_allowed) {
+			PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
+					   "allocation version");
+			dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
+		} else {
+			PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
+					   "allocation version");
+			dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
+		}
+	} else if (dev->data->scattered_rx) {
+		/*
+		 * Set the non-LRO scattered callback: there are bulk and
+		 * single allocation versions.
+		 */
+		if (adapter->rx_bulk_alloc_allowed) {
+			PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+					   "allocation callback (port=%d).",
+				     dev->data->port_id);
+			dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
+		} else {
+			PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
+					    "single allocation) "
+					    "Scattered Rx callback "
+					    "(port=%d).",
+				     dev->data->port_id);
+
+			dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
+		}
+	/*
+	 * Below we set "simple" callbacks according to port/queues parameters.
+	 * If parameters allow we are going to choose between the following
+	 * callbacks:
+	 *    - Bulk Allocation
+	 *    - Single buffer allocation (the simplest one)
+	 */
+	} else if (adapter->rx_bulk_alloc_allowed) {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+				    "satisfied. Rx Burst Bulk Alloc function "
+				    "will be used on port=%d.",
+			     dev->data->port_id);
+
+		dev->rx_pkt_burst = txgbe_recv_pkts_bulk_alloc;
+	} else {
+		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+				    "satisfied, or Scattered Rx is requested "
+				    "(port=%d).",
+			     dev->data->port_id);
+
+		dev->rx_pkt_burst = txgbe_recv_pkts;
+	}
 }
 
 /*
@@ -50,7 +205,148 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
 int __rte_cold
 txgbe_dev_rx_init(struct rte_eth_dev *dev)
 {
-	RTE_SET_USED(dev);
+	struct txgbe_hw *hw;
+	struct txgbe_rx_queue *rxq;
+	uint64_t bus_addr;
+	uint32_t fctrl;
+	uint32_t hlreg0;
+	uint32_t srrctl;
+	uint32_t rdrxctl;
+	uint32_t rxcsum;
+	uint16_t buf_size;
+	uint16_t i;
+	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = TXGBE_DEV_HW(dev);
+
+	/*
+	 * Make sure receives are disabled while setting
+	 * up the RX context (registers, descriptor rings, etc.).
+	 */
+	wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
+	wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
+
+	/* Enable receipt of broadcasted frames */
+	fctrl = rd32(hw, TXGBE_PSRCTL);
+	fctrl |= TXGBE_PSRCTL_BCA;
+	wr32(hw, TXGBE_PSRCTL, fctrl);
+
+	/*
+	 * Configure CRC stripping, if any.
+	 */
+	hlreg0 = rd32(hw, TXGBE_SECRXCTL);
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+		hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
+	else
+		hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
+	wr32(hw, TXGBE_SECRXCTL, hlreg0);
+
+	/*
+	 * Configure jumbo frame support, if any.
+	 */
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+			TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
+	} else {
+		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+			TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
+	}
+
+	/*
+	 * If loopback mode is configured, set LPBK bit.
+	 */
+	hlreg0 = rd32(hw, TXGBE_PSRCTL);
+	if (hw->mac.type == txgbe_mac_raptor &&
+	    dev->data->dev_conf.lpbk_mode)
+		hlreg0 |= TXGBE_PSRCTL_LBENA;
+	else
+		hlreg0 &= ~TXGBE_PSRCTL_LBENA;
+
+	wr32(hw, TXGBE_PSRCTL, hlreg0);
+
+	/*
+	 * Assume no header split and no VLAN strip support
+	 * on any Rx queue first .
+	 */
+	rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	/* Setup RX queues */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+
+		/*
+		 * Reset crc_len in case it was changed after queue setup by a
+		 * call to configure.
+		 */
+		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+			rxq->crc_len = RTE_ETHER_CRC_LEN;
+		else
+			rxq->crc_len = 0;
+
+		/* Setup the Base and Length of the Rx Descriptor Rings */
+		bus_addr = rxq->rx_ring_phys_addr;
+		wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
+				(uint32_t)(bus_addr & BIT_MASK32));
+		wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
+				(uint32_t)(bus_addr >> 32));
+		wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
+		wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
+
+		srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
+
+		/* Set if packets are dropped when no descriptors available */
+		if (rxq->drop_en)
+			srrctl |= TXGBE_RXCFG_DROP;
+
+		/*
+		 * Configure the RX buffer size in the PKTLEN field of
+		 * the RXCFG register of the queue.
+		 * The value is in 1 KB resolution. Valid values can be from
+		 * 1 KB to 16 KB.
+		 */
+		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM);
+		buf_size = ROUND_UP(buf_size, 0x1 << 10);
+		srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
+
+		wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
+
+		/* It adds dual VLAN length for supporting dual VLAN */
+		if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+					    2 * TXGBE_VLAN_TAG_SIZE > buf_size)
+			dev->data->scattered_rx = 1;
+		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+	}
+
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+		dev->data->scattered_rx = 1;
+
+	/*
+	 * Setup the Checksum Register.
+	 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+	 * Enable IP/L4 checkum computation by hardware if requested to do so.
+	 */
+	rxcsum = rd32(hw, TXGBE_PSRCTL);
+	rxcsum |= TXGBE_PSRCTL_PCSD;
+	if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		rxcsum |= TXGBE_PSRCTL_L4CSUM;
+	else
+		rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
+
+	wr32(hw, TXGBE_PSRCTL, rxcsum);
+
+	if (hw->mac.type == txgbe_mac_raptor) {
+		rdrxctl = rd32(hw, TXGBE_SECRXCTL);
+		if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+			rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
+		else
+			rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
+		wr32(hw, TXGBE_SECRXCTL, rdrxctl);
+	}
+
+	txgbe_set_rx_function(dev);
 
 	return 0;
 }
@@ -61,7 +357,29 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
 void __rte_cold
 txgbe_dev_tx_init(struct rte_eth_dev *dev)
 {
-	RTE_SET_USED(dev);
+	struct txgbe_hw     *hw;
+	struct txgbe_tx_queue *txq;
+	uint64_t bus_addr;
+	uint16_t i;
+
+	PMD_INIT_FUNC_TRACE();
+	hw = TXGBE_DEV_HW(dev);
+
+	/* Setup the Base and Length of the Tx Descriptor Rings */
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+
+		bus_addr = txq->tx_ring_phys_addr;
+		wr32(hw, TXGBE_TXBAL(txq->reg_idx),
+				(uint32_t)(bus_addr & BIT_MASK32));
+		wr32(hw, TXGBE_TXBAH(txq->reg_idx),
+				(uint32_t)(bus_addr >> 32));
+		wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
+			TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
+		/* Setup the HW Tx Head and TX Tail descriptor pointers */
+		wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
+		wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
+	}
 }
 
 /*
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index c5e2e56d3..2d337c46a 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -5,11 +5,34 @@
 #ifndef _TXGBE_RXTX_H_
 #define _TXGBE_RXTX_H_
 
+
+#define RTE_PMD_TXGBE_TX_MAX_BURST 32
+#define RTE_PMD_TXGBE_RX_MAX_BURST 32
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct txgbe_rx_queue {
+	struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
+	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+	uint16_t            reg_idx;  /**< RX queue register index. */
+	uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
+	uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
+	uint64_t	    offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+};
+
 /**
  * Structure associated with each TX queue.
  */
 struct txgbe_tx_queue {
 	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
+	uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
+	/**< Start freeing TX buffers if there are less free descriptors than
+	     this value. */
+	uint16_t            tx_free_thresh;
+	uint16_t            reg_idx;       /**< TX queue register index. */
+	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
 };
 
 /* Takes an ethdev and a queue and sets up the tx function to be used based on
-- 
2.18.4




  parent reply	other threads:[~2020-09-01 11:54 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-01 11:50 [dpdk-dev] [PATCH v1 01/42] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 02/42] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-09-09 17:50   ` Ferruh Yigit
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 03/42] net/txgbe: add device init and uninit Jiawen Wu
2020-09-09 17:52   ` Ferruh Yigit
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 04/42] net/txgbe: add error types and dummy function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 05/42] net/txgbe: add mac type and HW ops dummy Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 06/42] net/txgbe: add EEPROM functions Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 07/42] net/txgbe: add HW init function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 08/42] net/txgbe: add HW reset operation Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 09/42] net/txgbe: add PHY init Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 10/42] net/txgbe: add module identify Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 11/42] net/txgbe: add PHY reset Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 12/42] net/txgbe: add device start and stop Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 13/42] net/txgbe: add interrupt operation Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 14/42] net/txgbe: add link status change Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 15/42] net/txgbe: add multi-speed link setup Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 16/42] net/txgbe: add autoc read and write Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 17/42] net/txgbe: support device LED on and off Jiawen Wu
2020-09-01 11:50 ` Jiawen Wu [this message]
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 19/42] net/txgbe: add RX and TX start Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 20/42] net/txgbe: add RX and TX stop Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 21/42] net/txgbe: add RX and TX queues setup Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 22/42] net/txgbe: add packet type Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 23/42] net/txgbe: fill simple transmit function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 24/42] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 25/42] net/txgbe: fill receive functions Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 26/42] net/txgbe: fill TX prepare funtion Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 27/42] net/txgbe: add device stats get Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 28/42] net/txgbe: add device xstats get Jiawen Wu
2020-09-09 17:53   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 29/42] net/txgbe: add queue stats mapping and enable RX DMA unit Jiawen Wu
2020-09-09 17:54   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 30/42] net/txgbe: add device info get Jiawen Wu
2020-09-09 17:54   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 31/42] net/txgbe: add MAC address operations Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 32/42] net/txgbe: add FW version get operation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 33/42] net/txgbe: add EEPROM info " Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 34/42] net/txgbe: add remaining RX and TX queue operations Jiawen Wu
2020-09-09 18:15   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 35/42] net/txgbe: add VLAN handle support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 36/42] net/txgbe: add flow control support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 37/42] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 38/42] net/txgbe: add DCB packet buffer allocation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 39/42] net/txgbe: configure DCB HW resources Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 40/42] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 41/42] net/txgbe: add MTU set operation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 42/42] net/txgbe: add register dump support Jiawen Wu
2020-09-09 17:48 ` [dpdk-dev] [PATCH v1 01/42] net/txgbe: add build and doc infrastructure Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200901115113.1529675-18-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).