From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v4 20/58] net/txgbe: add Rx and Tx init
Date: Mon, 19 Oct 2020 16:53:37 +0800 [thread overview]
Message-ID: <20201019085415.82207-21-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201019085415.82207-1-jiawenwu@trustnetic.com>
Add receive and transmit initialize unit.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
doc/guides/nics/features/txgbe.ini | 4 +
doc/guides/nics/txgbe.rst | 1 +
drivers/net/txgbe/base/txgbe_type.h | 2 +
drivers/net/txgbe/txgbe_ethdev.h | 8 +
drivers/net/txgbe/txgbe_rxtx.c | 338 ++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_rxtx.h | 32 +++
6 files changed, 385 insertions(+)
diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 115a8699b..707f64131 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -7,8 +7,12 @@
Speed capabilities = Y
Link status = Y
Link status event = Y
+Jumbo frame = Y
+Scattered Rx = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
+CRC offload = P
+VLAN offload = P
Linux UIO = Y
Linux VFIO = Y
ARMv8 = Y
diff --git a/doc/guides/nics/txgbe.rst b/doc/guides/nics/txgbe.rst
index 0ec4148e2..9ae359c9b 100644
--- a/doc/guides/nics/txgbe.rst
+++ b/doc/guides/nics/txgbe.rst
@@ -12,6 +12,7 @@ Features
- Multiple queues for TX and RX
- MAC filtering
+- Jumbo frames
- Link state information
Prerequisites
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 8a8ca963f..747ada0f9 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -8,6 +8,8 @@
#define TXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define TXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define TXGBE_FRAME_SIZE_MAX (9728) /* Maximum frame size, +FCS */
+#define TXGBE_FRAME_SIZE_DFT (1518) /* Default frame size, +FCS */
#define TXGBE_MAX_UTA 128
#define TXGBE_ALIGN 128 /* as intel did */
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 8fd7a068e..096b17673 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -18,6 +18,7 @@
* Defines that were not part of txgbe_type.h as they are not used by the
* FreeBSD driver.
*/
+#define TXGBE_VLAN_TAG_SIZE 4
#define TXGBE_HKEY_MAX_INDEX 10
/*Default value of Max Rx Queue*/
#define TXGBE_MAX_RX_QUEUE_NUM 128
@@ -76,6 +77,13 @@ struct txgbe_adapter {
#define TXGBE_DEV_UTA_INFO(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->uta_info)
+/*
+ * RX/TX function prototypes
+ */
+int txgbe_dev_rx_init(struct rte_eth_dev *dev);
+
+void txgbe_dev_tx_init(struct rte_eth_dev *dev);
+
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 8a7282328..eadc06bcf 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -10,6 +10,8 @@
#include <rte_common.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_mbuf.h>
#include "txgbe_logs.h"
#include "base/txgbe.h"
@@ -110,3 +112,339 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
return tx_offload_capa;
}
+void __rte_cold
+txgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+}
+
+/**
+ * txgbe_get_rscctl_maxdesc
+ *
+ * @pool Memory pool of the Rx queue
+ */
+static inline uint32_t
+txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
+{
+ struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
+
+ uint16_t maxdesc =
+ RTE_IPV4_MAX_PKT_LEN /
+ (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
+
+ if (maxdesc >= 16)
+ return TXGBE_RXCFG_RSCMAX_16;
+ else if (maxdesc >= 8)
+ return TXGBE_RXCFG_RSCMAX_8;
+ else if (maxdesc >= 4)
+ return TXGBE_RXCFG_RSCMAX_4;
+ else
+ return TXGBE_RXCFG_RSCMAX_1;
+}
+
+/**
+ * txgbe_set_rsc - configure RSC related port HW registers
+ *
+ * Configures the port's RSC related registers.
+ *
+ * @dev port handle
+ *
+ * Returns 0 in case of success or a non-zero error code
+ */
+static int
+txgbe_set_rsc(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_dev_info dev_info = { 0 };
+ bool rsc_capable = false;
+ uint16_t i;
+ uint32_t rdrxctl;
+ uint32_t rfctl;
+
+ /* Sanity check */
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+ rsc_capable = true;
+
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
+ "support it");
+ return -EINVAL;
+ }
+
+ /* RSC global configuration */
+
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
+ "is disabled");
+ return -EINVAL;
+ }
+
+ rfctl = rd32(hw, TXGBE_PSRCTL);
+ if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ rfctl &= ~TXGBE_PSRCTL_RSCDIA;
+ else
+ rfctl |= TXGBE_PSRCTL_RSCDIA;
+ wr32(hw, TXGBE_PSRCTL, rfctl);
+
+ /* If LRO hasn't been requested - we are done here. */
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ return 0;
+
+ /* Set PSRCTL.RSCACK bit */
+ rdrxctl = rd32(hw, TXGBE_PSRCTL);
+ rdrxctl |= TXGBE_PSRCTL_RSCACK;
+ wr32(hw, TXGBE_PSRCTL, rdrxctl);
+
+ /* Per-queue RSC configuration */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ uint32_t srrctl =
+ rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ uint32_t psrtype =
+ rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
+ uint32_t eitr =
+ rd32(hw, TXGBE_ITR(rxq->reg_idx));
+
+ /*
+ * txgbe PMD doesn't support header-split at the moment.
+ */
+ srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
+ srrctl |= TXGBE_RXCFG_HDRLEN(128);
+
+ /*
+ * TODO: Consider setting the Receive Descriptor Minimum
+ * Threshold Size for an RSC case. This is not an obviously
+ * beneficiary option but the one worth considering...
+ */
+
+ srrctl |= TXGBE_RXCFG_RSCENA;
+ srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
+ srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
+ psrtype |= TXGBE_POOLRSS_L4HDR;
+
+ /*
+ * RSC: Set ITR interval corresponding to 2K ints/s.
+ *
+ * Full-sized RSC aggregations for a 10Gb/s link will
+ * arrive at about 20K aggregation/s rate.
+ *
+ * 2K inst/s rate will make only 10% of the
+ * aggregations to be closed due to the interrupt timer
+ * expiration for a streaming at wire-speed case.
+ *
+ * For a sparse streaming case this setting will yield
+ * at most 500us latency for a single RSC aggregation.
+ */
+ eitr &= ~TXGBE_ITR_IVAL_MASK;
+ eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
+ eitr |= TXGBE_ITR_WRDSA;
+
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
+ wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
+ wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
+
+ /*
+ * RSC requires the mapping of the queue to the
+ * interrupt vector.
+ */
+ txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
+ }
+
+ dev->data->lro = 1;
+
+ PMD_INIT_LOG(DEBUG, "enabling LRO mode");
+
+ return 0;
+}
+
+/*
+ * Initializes Receive Unit.
+ */
+int __rte_cold
+txgbe_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_rx_queue *rxq;
+ uint64_t bus_addr;
+ uint32_t fctrl;
+ uint32_t hlreg0;
+ uint32_t srrctl;
+ uint32_t rdrxctl;
+ uint32_t rxcsum;
+ uint16_t buf_size;
+ uint16_t i;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the RX context (registers, descriptor rings, etc.).
+ */
+ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
+ wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
+
+ /* Enable receipt of broadcasted frames */
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl |= TXGBE_PSRCTL_BCA;
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ /*
+ * Configure CRC stripping, if any.
+ */
+ hlreg0 = rd32(hw, TXGBE_SECRXCTL);
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
+ else
+ hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, TXGBE_SECRXCTL, hlreg0);
+
+ /*
+ * Configure jumbo frame support, if any.
+ */
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
+ } else {
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
+ }
+
+ /*
+ * If loopback mode is configured, set LPBK bit.
+ */
+ hlreg0 = rd32(hw, TXGBE_PSRCTL);
+ if (hw->mac.type == txgbe_mac_raptor &&
+ dev->data->dev_conf.lpbk_mode)
+ hlreg0 |= TXGBE_PSRCTL_LBENA;
+ else
+ hlreg0 &= ~TXGBE_PSRCTL_LBENA;
+
+ wr32(hw, TXGBE_PSRCTL, hlreg0);
+
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ /*
+ * Reset crc_len in case it was changed after queue setup by a
+ * call to configure.
+ */
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ bus_addr = rxq->rx_ring_phys_addr;
+ wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
+ (uint32_t)(bus_addr & BIT_MASK32));
+ wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
+ wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
+
+ srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= TXGBE_RXCFG_DROP;
+
+ /*
+ * Configure the RX buffer size in the PKTLEN field of
+ * the RXCFG register of the queue.
+ * The value is in 1 KB resolution. Valid values can be from
+ * 1 KB to 16 KB.
+ */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
+ buf_size = ROUND_UP(buf_size, 0x1 << 10);
+ srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
+
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
+ dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ dev->data->scattered_rx = 1;
+
+ /*
+ * Setup the Checksum Register.
+ * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+ * Enable IP/L4 checksum computation by hardware if requested to do so.
+ */
+ rxcsum = rd32(hw, TXGBE_PSRCTL);
+ rxcsum |= TXGBE_PSRCTL_PCSD;
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxcsum |= TXGBE_PSRCTL_L4CSUM;
+ else
+ rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
+
+ wr32(hw, TXGBE_PSRCTL, rxcsum);
+
+ if (hw->mac.type == txgbe_mac_raptor) {
+ rdrxctl = rd32(hw, TXGBE_SECRXCTL);
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
+ else
+ rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, TXGBE_SECRXCTL, rdrxctl);
+ }
+
+ rc = txgbe_set_rsc(dev);
+ if (rc)
+ return rc;
+
+ txgbe_set_rx_function(dev);
+
+ return 0;
+}
+
+/*
+ * Initializes Transmit Unit.
+ */
+void __rte_cold
+txgbe_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_tx_queue *txq;
+ uint64_t bus_addr;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ bus_addr = txq->tx_ring_phys_addr;
+ wr32(hw, TXGBE_TXBAL(txq->reg_idx),
+ (uint32_t)(bus_addr & BIT_MASK32));
+ wr32(hw, TXGBE_TXBAH(txq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
+ TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
+ /* Setup the HW Tx Head and TX Tail descriptor pointers */
+ wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
+ wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
+ }
+}
+
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index 9488c2b75..7d3d9c275 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -5,8 +5,40 @@
#ifndef _TXGBE_RXTX_H_
#define _TXGBE_RXTX_H_
+#define RTE_PMD_TXGBE_TX_MAX_BURST 32
+#define RTE_PMD_TXGBE_RX_MAX_BURST 32
+
#define TXGBE_TX_MAX_SEG 40
+/**
+ * Structure associated with each RX queue.
+ */
+struct txgbe_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t reg_idx; /**< RX queue register index. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct txgbe_tx_queue {
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ /**< Start freeing TX buffers if there are less free descriptors than
+ * this value.
+ */
+ uint16_t tx_free_thresh;
+ uint16_t reg_idx; /**< TX queue register index. */
+ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+};
+
+void txgbe_set_rx_function(struct rte_eth_dev *dev);
+
uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
uint64_t txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
uint64_t txgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
--
2.18.4
next prev parent reply other threads:[~2020-10-19 9:04 UTC|newest]
Thread overview: 84+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-19 8:53 [dpdk-dev] [PATCH v4 00/58] net: txgbe PMD Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 01/58] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 02/58] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 03/58] net/txgbe: add device init and uninit Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 04/58] net/txgbe: add error types and registers Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 05/58] net/txgbe: add MAC type and bus lan id Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 06/58] net/txgbe: add HW infrastructure and dummy function Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 07/58] net/txgbe: add EEPROM functions Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 08/58] net/txgbe: add HW init and reset operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 09/58] net/txgbe: add PHY init Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 10/58] net/txgbe: add module identify Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 11/58] net/txgbe: add PHY reset Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 12/58] net/txgbe: add info get operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 13/58] net/txgbe: add interrupt operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 14/58] net/txgbe: add device configure operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 15/58] net/txgbe: add link status change Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 16/58] net/txgbe: add multi-speed link setup Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 17/58] net/txgbe: add autoc read and write Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 18/58] net/txgbe: add MAC address operations Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 19/58] net/txgbe: add unicast hash bitmap Jiawen Wu
2020-10-19 8:53 ` Jiawen Wu [this message]
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 21/58] net/txgbe: add Rx and Tx queues setup and release Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 22/58] net/txgbe: add Rx and Tx start and stop Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 23/58] net/txgbe: add packet type Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 24/58] net/txgbe: fill simple transmit function Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 25/58] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 26/58] net/txgbe: fill Tx prepare function Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 27/58] net/txgbe: fill receive functions Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 28/58] net/txgbe: add device start operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 29/58] net/txgbe: add Rx and Tx data path start and stop Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 30/58] net/txgbe: add device stop and close operations Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 31/58] net/txgbe: support Rx interrupt Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 32/58] net/txgbe: add Rx and Tx queue info get Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 33/58] net/txgbe: add device stats get Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 34/58] net/txgbe: add device xstats get Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 35/58] net/txgbe: add queue stats mapping Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 36/58] net/txgbe: add VLAN handle support Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 37/58] net/txgbe: add SWFW semaphore and lock Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 38/58] net/txgbe: add PF module init and uninit for SRIOV Jiawen Wu
2020-10-26 14:54 ` Ferruh Yigit
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 39/58] net/txgbe: add process mailbox operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 40/58] net/txgbe: add PF module configure for SRIOV Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 41/58] net/txgbe: add VMDq configure Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 42/58] net/txgbe: add RSS support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 43/58] net/txgbe: add DCB support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 44/58] net/txgbe: add flow control support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 45/58] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 46/58] net/txgbe: add priority flow control support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 47/58] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 48/58] net/txgbe: add MTU set operation Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 49/58] net/txgbe: add FW version get operation Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 50/58] net/txgbe: add EEPROM info " Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 51/58] net/txgbe: add register dump support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 52/58] net/txgbe: support device LED on and off Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 53/58] net/txgbe: add mirror rule operations Jiawen Wu
2020-10-26 14:54 ` Ferruh Yigit
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 54/58] net/txgbe: add PTP support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 55/58] net/txgbe: add DCB info get operation Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 56/58] net/txgbe: add Rx and Tx descriptor status Jiawen Wu
2020-10-26 14:54 ` Ferruh Yigit
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 57/58] net/txgbe: change stop operation callback to return int Jiawen Wu
2020-10-26 14:55 ` Ferruh Yigit
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 58/58] net/txgbe: introduce log type in the driver documentation Jiawen Wu
2020-10-26 14:55 ` Ferruh Yigit
2020-10-22 11:23 ` [dpdk-dev] [PATCH v4 00/58] net: txgbe PMD Jiawen Wu
2020-10-22 11:44 ` Ferruh Yigit
2020-10-26 14:55 ` Ferruh Yigit
2020-10-27 2:39 ` Jiawen Wu
2020-10-27 11:37 ` Ferruh Yigit
2020-11-03 23:08 ` Thomas Monjalon
2020-11-04 17:24 ` Ferruh Yigit
2020-11-05 1:55 ` Jiawen Wu
2020-11-05 8:55 ` Jiawen Wu
2020-11-05 9:28 ` Thomas Monjalon
2020-11-06 6:28 ` Honnappa Nagarahalli
2020-11-06 9:22 ` Jiawen Wu
2020-11-06 17:36 ` Honnappa Nagarahalli
2020-11-06 18:21 ` Honnappa Nagarahalli
2020-11-06 19:00 ` Thomas Monjalon
2020-11-06 19:56 ` Honnappa Nagarahalli
2020-11-07 9:55 ` Thomas Monjalon
2020-10-27 8:48 ` David Marchand
2020-10-27 11:36 ` Ferruh Yigit
2020-10-27 11:39 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201019085415.82207-21-jiawenwu@trustnetic.com \
--to=jiawenwu@trustnetic.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).