From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v4 41/58] net/txgbe: add VMDq configure
Date: Mon, 19 Oct 2020 16:53:58 +0800 [thread overview]
Message-ID: <20201019085415.82207-42-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20201019085415.82207-1-jiawenwu@trustnetic.com>
Add multiple queue setting with VMDq.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
doc/guides/nics/features/txgbe.ini | 1 +
drivers/net/txgbe/txgbe_ethdev.c | 35 ++++
drivers/net/txgbe/txgbe_ethdev.h | 2 +
drivers/net/txgbe/txgbe_rxtx.c | 260 +++++++++++++++++++++++++++++
4 files changed, 298 insertions(+)
diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 022e56d45..578ec05b0 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -15,6 +15,7 @@ LRO = Y
TSO = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
+VMDq = Y
SR-IOV = Y
VLAN filter = Y
Rate limitation = Y
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index c60df26c8..e14980a2e 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -943,6 +943,17 @@ txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return 0;
}
+static void
+txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ /* VLNCTL: enable vlan filtering and allow all vlan tags through */
+ uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
+
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+}
+
static int
txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
{
@@ -1330,6 +1341,11 @@ txgbe_dev_start(struct rte_eth_dev *dev)
goto error;
}
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable vlan filtering for VMDq */
+ txgbe_vmdq_vlan_hw_filter_enable(dev);
+ }
+
/* Restore vf rate limit */
if (vfinfo != NULL) {
for (vf = 0; vf < pci_dev->max_vfs; vf++)
@@ -2765,6 +2781,25 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
return 0;
}
+uint32_t
+txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+ uint32_t new_val = orig_val;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ new_val |= TXGBE_POOLETHCTL_UTA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ new_val |= TXGBE_POOLETHCTL_MCHA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ new_val |= TXGBE_POOLETHCTL_UCHA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ new_val |= TXGBE_POOLETHCTL_BCA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ new_val |= TXGBE_POOLETHCTL_MCP;
+
+ return new_val;
+}
+
static int
txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index ba9cab718..bf13e0378 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -256,6 +256,8 @@ void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
+uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index fd6a3f436..c8ca9e2d0 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -2555,6 +2555,145 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
dev->data->nb_tx_queues = 0;
}
+static void
+txgbe_rss_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
+}
+
+#define NUM_VFTA_REGISTERS 128
+
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct txgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+ num_pools = cfg->nb_queue_pools;
+
+ txgbe_rss_disable(dev);
+
+ /* enable vmdq */
+ mrqc = TXGBE_PORTCTL_NUMVT_64;
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mrqc);
+
+ /* turn on virtualisation and set the default pool */
+ vt_ctl = TXGBE_POOLCTL_RPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
+ else
+ vt_ctl |= TXGBE_POOLCTL_DEFDSA;
+
+ wr32(hw, TXGBE_POOLCTL, vt_ctl);
+
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = txgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ wr32(hw, TXGBE_POOLETHCTL(i), vmolr);
+ }
+
+ /* enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), UINT32_MAX);
+
+ /* pool enabling for receive - 64 */
+ wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
+ if (num_pools == ETH_64_POOLS)
+ wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
+
+ /*
+ * allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ wr32(hw, TXGBE_ETHADDRIDX, 0);
+ wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
+ wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
+
+ /* set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ wr32(hw, TXGBE_PSRVLANIDX, i);
+ wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
+ TXGBE_PSRVLAN_VID(cfg->pool_map[i].vlan_id)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 64
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+ wr32(hw, TXGBE_PSRVLANPLM(0),
+ (cfg->pool_map[i].pools & UINT32_MAX));
+ else
+ wr32(hw, TXGBE_PSRVLANPLM(1),
+ ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
+ }
+
+ /* Tx General Switch Control Enables VMDQ loopback */
+ if (cfg->enable_loop_back) {
+ wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
+ for (i = 0; i < 64; i++)
+ wr32m(hw, TXGBE_POOLETHCTL(i),
+ TXGBE_POOLETHCTL_LLB, TXGBE_POOLETHCTL_LLB);
+ }
+
+ txgbe_flush(hw);
+}
+
+/*
+ * txgbe_vmdq_tx_hw_configure - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+txgbe_vmdq_tx_hw_configure(struct txgbe_hw *hw)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ wr32(hw, TXGBE_POOLTXENA(0), UINT32_MAX);
+ wr32(hw, TXGBE_POOLTXENA(1), UINT32_MAX);
+
+ /* Disable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK,
+ TXGBE_PORTCTL_NUMVT_64);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+
+ /* Enable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ txgbe_flush(hw);
+}
+
static int __rte_cold
txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
{
@@ -2587,6 +2726,119 @@ txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
return 0;
}
+static int
+txgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t mrqc;
+
+ mrqc = rd32(hw, TXGBE_PORTCTL);
+ mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_64;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_32;
+ break;
+
+ case ETH_16_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_16;
+ break;
+ default:
+ PMD_INIT_LOG(ERR,
+ "invalid pool number in IOV mode");
+ return 0;
+ }
+
+ wr32(hw, TXGBE_PORTCTL, mrqc);
+
+ return 0;
+}
+
+static int
+txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * VMDq multi-queue setting
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_ONLY:
+ txgbe_vmdq_rx_hw_configure(dev);
+ break;
+
+ case ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ txgbe_rss_disable(dev);
+ break;
+ }
+ } else {
+ /* SRIOV active scheme
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ default:
+ txgbe_config_vf_default(dev);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t mtqc;
+ uint32_t rttdcs;
+
+ /* disable arbiter */
+ rttdcs = rd32(hw, TXGBE_ARBTXCTL);
+ rttdcs |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, rttdcs);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB w/o VMDq multi-queue setting
+ */
+ if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+ txgbe_vmdq_tx_hw_configure(hw);
+ else
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
+ } else {
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ /*
+ * SRIOV active scheme
+ * FIXME if support DCB together with VMDq & SRIOV
+ */
+ case ETH_64_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_64;
+ break;
+ case ETH_32_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_32;
+ break;
+ case ETH_16_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_16;
+ break;
+ default:
+ mtqc = 0;
+ PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ }
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mtqc);
+ }
+
+ /* re-enable arbiter */
+ rttdcs &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, rttdcs);
+
+ return 0;
+}
+
/**
* txgbe_get_rscctl_maxdesc
*
@@ -2919,6 +3171,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
+ /*
+ * Device configured with multiple RX queues.
+ */
+ txgbe_dev_mq_rx_configure(dev);
+
/*
* Setup the Checksum Register.
* Disable Full-Packet Checksum which is mutually exclusive with RSS.
@@ -2980,6 +3237,9 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev)
wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
}
+
+ /* Device configured with multiple TX queues. */
+ txgbe_dev_mq_tx_configure(dev);
}
/*
--
2.18.4
next prev parent reply other threads:[~2020-10-19 9:09 UTC|newest]
Thread overview: 84+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-19 8:53 [dpdk-dev] [PATCH v4 00/58] net: txgbe PMD Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 01/58] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 02/58] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 03/58] net/txgbe: add device init and uninit Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 04/58] net/txgbe: add error types and registers Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 05/58] net/txgbe: add MAC type and bus lan id Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 06/58] net/txgbe: add HW infrastructure and dummy function Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 07/58] net/txgbe: add EEPROM functions Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 08/58] net/txgbe: add HW init and reset operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 09/58] net/txgbe: add PHY init Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 10/58] net/txgbe: add module identify Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 11/58] net/txgbe: add PHY reset Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 12/58] net/txgbe: add info get operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 13/58] net/txgbe: add interrupt operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 14/58] net/txgbe: add device configure operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 15/58] net/txgbe: add link status change Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 16/58] net/txgbe: add multi-speed link setup Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 17/58] net/txgbe: add autoc read and write Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 18/58] net/txgbe: add MAC address operations Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 19/58] net/txgbe: add unicast hash bitmap Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 20/58] net/txgbe: add Rx and Tx init Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 21/58] net/txgbe: add Rx and Tx queues setup and release Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 22/58] net/txgbe: add Rx and Tx start and stop Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 23/58] net/txgbe: add packet type Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 24/58] net/txgbe: fill simple transmit function Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 25/58] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 26/58] net/txgbe: fill Tx prepare function Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 27/58] net/txgbe: fill receive functions Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 28/58] net/txgbe: add device start operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 29/58] net/txgbe: add Rx and Tx data path start and stop Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 30/58] net/txgbe: add device stop and close operations Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 31/58] net/txgbe: support Rx interrupt Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 32/58] net/txgbe: add Rx and Tx queue info get Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 33/58] net/txgbe: add device stats get Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 34/58] net/txgbe: add device xstats get Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 35/58] net/txgbe: add queue stats mapping Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 36/58] net/txgbe: add VLAN handle support Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 37/58] net/txgbe: add SWFW semaphore and lock Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 38/58] net/txgbe: add PF module init and uninit for SRIOV Jiawen Wu
2020-10-26 14:54 ` Ferruh Yigit
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 39/58] net/txgbe: add process mailbox operation Jiawen Wu
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 40/58] net/txgbe: add PF module configure for SRIOV Jiawen Wu
2020-10-19 8:53 ` Jiawen Wu [this message]
2020-10-19 8:53 ` [dpdk-dev] [PATCH v4 42/58] net/txgbe: add RSS support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 43/58] net/txgbe: add DCB support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 44/58] net/txgbe: add flow control support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 45/58] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 46/58] net/txgbe: add priority flow control support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 47/58] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 48/58] net/txgbe: add MTU set operation Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 49/58] net/txgbe: add FW version get operation Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 50/58] net/txgbe: add EEPROM info " Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 51/58] net/txgbe: add register dump support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 52/58] net/txgbe: support device LED on and off Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 53/58] net/txgbe: add mirror rule operations Jiawen Wu
2020-10-26 14:54 ` Ferruh Yigit
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 54/58] net/txgbe: add PTP support Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 55/58] net/txgbe: add DCB info get operation Jiawen Wu
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 56/58] net/txgbe: add Rx and Tx descriptor status Jiawen Wu
2020-10-26 14:54 ` Ferruh Yigit
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 57/58] net/txgbe: change stop operation callback to return int Jiawen Wu
2020-10-26 14:55 ` Ferruh Yigit
2020-10-19 8:54 ` [dpdk-dev] [PATCH v4 58/58] net/txgbe: introduce log type in the driver documentation Jiawen Wu
2020-10-26 14:55 ` Ferruh Yigit
2020-10-22 11:23 ` [dpdk-dev] [PATCH v4 00/58] net: txgbe PMD Jiawen Wu
2020-10-22 11:44 ` Ferruh Yigit
2020-10-26 14:55 ` Ferruh Yigit
2020-10-27 2:39 ` Jiawen Wu
2020-10-27 11:37 ` Ferruh Yigit
2020-11-03 23:08 ` Thomas Monjalon
2020-11-04 17:24 ` Ferruh Yigit
2020-11-05 1:55 ` Jiawen Wu
2020-11-05 8:55 ` Jiawen Wu
2020-11-05 9:28 ` Thomas Monjalon
2020-11-06 6:28 ` Honnappa Nagarahalli
2020-11-06 9:22 ` Jiawen Wu
2020-11-06 17:36 ` Honnappa Nagarahalli
2020-11-06 18:21 ` Honnappa Nagarahalli
2020-11-06 19:00 ` Thomas Monjalon
2020-11-06 19:56 ` Honnappa Nagarahalli
2020-11-07 9:55 ` Thomas Monjalon
2020-10-27 8:48 ` David Marchand
2020-10-27 11:36 ` Ferruh Yigit
2020-10-27 11:39 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201019085415.82207-42-jiawenwu@trustnetic.com \
--to=jiawenwu@trustnetic.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).