From: Wenbo Cao <caowenbo@mucse.com>
To: thomas@monjalon.net, Wenbo Cao <caowenbo@mucse.com>
Cc: stephen@networkplumber.org, dev@dpdk.org, ferruh.yigit@amd.com,
andrew.rybchenko@oktetlabs.ru, yaojun@mucse.com
Subject: [PATCH v7 10/28] net/rnp: add support device start stop operations
Date: Sat, 8 Feb 2025 10:43:47 +0800 [thread overview]
Message-ID: <1738982645-34550-11-git-send-email-caowenbo@mucse.com> (raw)
In-Reply-To: <1738982645-34550-1-git-send-email-caowenbo@mucse.com>
add basic support for device to start/stop function
Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
drivers/net/rnp/base/rnp_common.c | 22 +++
drivers/net/rnp/base/rnp_common.h | 1 +
drivers/net/rnp/base/rnp_dma_regs.h | 10 ++
drivers/net/rnp/base/rnp_eth_regs.h | 5 +
drivers/net/rnp/base/rnp_hw.h | 1 +
drivers/net/rnp/base/rnp_mac.h | 14 ++
drivers/net/rnp/base/rnp_mac_regs.h | 42 ++++++
drivers/net/rnp/rnp.h | 3 +
drivers/net/rnp/rnp_ethdev.c | 274 +++++++++++++++++++++++++++++++++++-
9 files changed, 371 insertions(+), 1 deletion(-)
diff --git a/drivers/net/rnp/base/rnp_common.c b/drivers/net/rnp/base/rnp_common.c
index 7d1f96c..38a3f55 100644
--- a/drivers/net/rnp/base/rnp_common.c
+++ b/drivers/net/rnp/base/rnp_common.c
@@ -79,3 +79,25 @@ int rnp_init_hw(struct rnp_hw *hw)
return 0;
}
+
+int rnp_clock_valid_check(struct rnp_hw *hw, u16 nr_lane)
+{
+ uint16_t timeout = 0;
+
+ do {
+ RNP_E_REG_WR(hw, RNP_RSS_REDIR_TB(nr_lane, 0), 0x7f);
+ udelay(10);
+ timeout++;
+ if (timeout >= 1000)
+ break;
+ } while (RNP_E_REG_RD(hw, RNP_RSS_REDIR_TB(nr_lane, 0)) != 0x7f);
+
+ if (timeout >= 1000) {
+ RNP_PMD_ERR("ethernet[%d] eth reg can't be write", nr_lane);
+ return -EPERM;
+ }
+ /* clear the dirty value */
+ RNP_E_REG_WR(hw, RNP_RSS_REDIR_TB(nr_lane, 0), 0);
+
+ return 0;
+}
diff --git a/drivers/net/rnp/base/rnp_common.h b/drivers/net/rnp/base/rnp_common.h
index bd00708..958fcb6 100644
--- a/drivers/net/rnp/base/rnp_common.h
+++ b/drivers/net/rnp/base/rnp_common.h
@@ -12,5 +12,6 @@
((macaddr[4] << 8)) | (macaddr[5]))
int rnp_init_hw(struct rnp_hw *hw);
int rnp_setup_common_ops(struct rnp_hw *hw);
+int rnp_clock_valid_check(struct rnp_hw *hw, u16 nr_lane);
#endif /* _RNP_COMMON_H_ */
diff --git a/drivers/net/rnp/base/rnp_dma_regs.h b/drivers/net/rnp/base/rnp_dma_regs.h
index 3664c0a..32e738a 100644
--- a/drivers/net/rnp/base/rnp_dma_regs.h
+++ b/drivers/net/rnp/base/rnp_dma_regs.h
@@ -6,9 +6,19 @@
#define _RNP_DMA_REGS_H_
#define RNP_DMA_VERSION (0)
+#define RNP_DMA_CTRL (0x4)
+/* 1bit <-> 16 bytes dma addr size */
+#define RNP_DMA_SCATTER_MEM_MASK RTE_GENMASK32(31, 16)
+#define RNP_DMA_SCATTER_MEN_S (16)
+#define RNP_DMA_RX_MEM_PAD_EN RTE_BIT32(8)
+#define RTE_DMA_VEB_BYPASS RTE_BIT32(4)
+#define RNP_DMA_TXRX_LOOP RTE_BIT32(1)
+#define RNP_DMA_TXMRX_LOOP RTE_BIT32(0)
+
#define RNP_DMA_HW_EN (0x10)
#define RNP_DMA_EN_ALL (0b1111)
#define RNP_DMA_HW_STATE (0x14)
+
/* --- queue register --- */
/* queue enable */
#define RNP_RXQ_START(qid) _RING_(0x0010 + 0x100 * (qid))
diff --git a/drivers/net/rnp/base/rnp_eth_regs.h b/drivers/net/rnp/base/rnp_eth_regs.h
index 10e3d95..60766d2 100644
--- a/drivers/net/rnp/base/rnp_eth_regs.h
+++ b/drivers/net/rnp/base/rnp_eth_regs.h
@@ -10,6 +10,9 @@
#define RNP_E_FILTER_EN _ETH_(0x801c)
#define RNP_E_REDIR_EN _ETH_(0x8030)
+#define RNP_RX_ETH_F_CTRL(n) _ETH_(0x8070 + ((n) * 0x8))
+#define RNP_RX_ETH_F_OFF (0x7ff)
+#define RNP_RX_ETH_F_ON (0x270)
/* rx queue flow ctrl */
#define RNP_RX_FC_ENABLE _ETH_(0x8520)
#define RNP_RING_FC_EN(n) _ETH_(0x8524 + ((0x4) * ((n) / 32)))
@@ -28,6 +31,8 @@
#define RNP_MAC_HASH_MASK RTE_GENMASK32(11, 0)
#define RNP_MAC_MULTICASE_TBL_EN RTE_BIT32(2)
#define RNP_MAC_UNICASE_TBL_EN RTE_BIT32(3)
+/* rss function ctrl */
+#define RNP_RSS_REDIR_TB(n, id) _ETH_(0xe000 + ((n) * 0x200) + ((id) * 0x4))
#define RNP_TC_PORT_OFFSET(lane) _ETH_(0xe840 + 0x04 * (lane))
diff --git a/drivers/net/rnp/base/rnp_hw.h b/drivers/net/rnp/base/rnp_hw.h
index 4f5a73e..ed1e7eb 100644
--- a/drivers/net/rnp/base/rnp_hw.h
+++ b/drivers/net/rnp/base/rnp_hw.h
@@ -120,6 +120,7 @@ struct rnp_hw {
bool lane_is_sgmii[RNP_MAX_PORT_OF_PF];
struct rnp_mbx_info mbx;
struct rnp_fw_info fw_info;
+ u16 min_dma_size;
spinlock_t rxq_reset_lock;
spinlock_t txq_reset_lock;
diff --git a/drivers/net/rnp/base/rnp_mac.h b/drivers/net/rnp/base/rnp_mac.h
index 57cbd9e..1dac903 100644
--- a/drivers/net/rnp/base/rnp_mac.h
+++ b/drivers/net/rnp/base/rnp_mac.h
@@ -7,6 +7,20 @@
#include "rnp_osdep.h"
#include "rnp_hw.h"
+#include "rnp_eth_regs.h"
+
+#define RNP_RX_ETH_DISABLE(hw, nr_lane) do { \
+ wmb(); \
+ RNP_E_REG_WR(hw, RNP_RX_ETH_F_CTRL(nr_lane), \
+ RNP_RX_ETH_F_OFF); \
+} while (0)
+
+#define RNP_RX_ETH_ENABLE(hw, nr_lane) do { \
+ wmb(); \
+ RNP_E_REG_WR(hw, RNP_RX_ETH_F_CTRL(nr_lane), \
+ RNP_RX_ETH_F_ON); \
+} while (0)
+
void rnp_mac_ops_init(struct rnp_hw *hw);
int rnp_get_mac_addr(struct rnp_eth_port *port, u8 *mac);
diff --git a/drivers/net/rnp/base/rnp_mac_regs.h b/drivers/net/rnp/base/rnp_mac_regs.h
index 1dc0668..1ae8801 100644
--- a/drivers/net/rnp/base/rnp_mac_regs.h
+++ b/drivers/net/rnp/base/rnp_mac_regs.h
@@ -7,6 +7,41 @@
#define RNP_MAC_BASE_OFFSET(n) (_MAC_(0) + ((0x10000) * (n)))
+#define RNP_MAC_TX_CFG (0x0)
+/* Transmitter Enable */
+#define RNP_MAC_TE RTE_BIT32(0)
+/* Jabber Disable */
+#define RNP_MAC_JD RTE_BIT32(16)
+#define RNP_SPEED_SEL_MASK RTE_GENMASK32(30, 28)
+#define RNP_SPEED_SEL_S (28)
+#define RNP_SPEED_SEL_1G (b111 << RNP_SPEED_SEL_S)
+#define RNP_SPEED_SEL_10G (b010 << RNP_SPEED_SEL_S)
+#define RNP_SPEED_SEL_40G (b000 << RNP_SPEED_SEL_S)
+
+#define RNP_MAC_RX_CFG (0x4)
+/* Receiver Enable */
+#define RNP_MAC_RE RTE_BIT32(0)
+/* Automatic Pad or CRC Stripping */
+#define RNP_MAC_ACS RTE_BIT32(1)
+/* CRC stripping for Type packets */
+#define RNP_MAC_CST RTE_BIT32(2)
+/* Disable CRC Check */
+#define RNP_MAC_DCRCC RTE_BIT32(3)
+/* Enable Max Frame Size Limit */
+#define RNP_MAC_GPSLCE RTE_BIT32(6)
+/* Watchdog Disable */
+#define RNP_MAC_WD RTE_BIT32(7)
+/* Jumbo Packet Support En */
+#define RNP_MAC_JE RTE_BIT32(8)
+/* Enable IPC */
+#define RNP_MAC_IPC RTE_BIT32(9)
+/* Loopback Mode */
+#define RNP_MAC_LM RTE_BIT32(10)
+/* Giant Packet Size Limit */
+#define RNP_MAC_GPSL_MASK RTE_GENMASK32(29, 16)
+#define RNP_MAC_MAX_GPSL (1518)
+#define RNP_MAC_CPSL_SHIFT (16)
+
#define RNP_MAC_PKT_FLT_CTRL (0x8)
/* Receive All */
#define RNP_MAC_RA RTE_BIT32(31)
@@ -35,5 +70,12 @@
#define RNP_MAC_HPF RTE_BIT32(10)
#define RNP_MAC_VTFE RTE_BIT32(16)
+#define RNP_MAC_VFE RTE_BIT32(16)
+/* mac link ctrl */
+#define RNP_MAC_LPI_CTRL (0xd0)
+/* PHY Link Status Disable */
+#define RNP_MAC_PLSDIS RTE_BIT32(18)
+/* PHY Link Status */
+#define RNP_MAC_PLS RTE_BIT32(17)
#endif /* _RNP_MAC_REGS_H_ */
diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h
index ab7bd60..086135a 100644
--- a/drivers/net/rnp/rnp.h
+++ b/drivers/net/rnp/rnp.h
@@ -107,6 +107,9 @@ struct rnp_eth_port {
struct rnp_port_attr attr;
struct rnp_tx_queue *tx_queues[RNP_MAX_RX_QUEUE_NUM];
struct rnp_hw *hw;
+
+ rte_spinlock_t rx_mac_lock;
+ bool port_stopped;
};
struct rnp_eth_adapter {
diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index d5e5ef7..7b7ed8c 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -14,6 +14,8 @@
#include "base/rnp_mbx_fw.h"
#include "base/rnp_mac.h"
#include "base/rnp_common.h"
+#include "base/rnp_dma_regs.h"
+#include "base/rnp_mac_regs.h"
#include "rnp_rxtx.h"
static struct rte_eth_dev *
@@ -52,9 +54,275 @@ static void rnp_dev_interrupt_handler(void *param)
RTE_SET_USED(param);
}
+static void rnp_mac_rx_enable(struct rte_eth_dev *dev)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ uint16_t lane = port->attr.nr_lane;
+ struct rnp_hw *hw = port->hw;
+ uint32_t mac_cfg;
+
+ rte_spinlock_lock(&port->rx_mac_lock);
+ mac_cfg = RNP_MAC_REG_RD(hw, lane, RNP_MAC_RX_CFG);
+ mac_cfg |= RNP_MAC_RE;
+
+ mac_cfg &= ~RNP_MAC_GPSL_MASK;
+ mac_cfg |= (RNP_MAC_MAX_GPSL << RNP_MAC_CPSL_SHIFT);
+ RNP_MAC_REG_WR(hw, lane, RNP_MAC_RX_CFG, mac_cfg);
+ rte_spinlock_unlock(&port->rx_mac_lock);
+}
+
+static void rnp_mac_rx_disable(struct rte_eth_dev *dev)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ uint16_t lane = port->attr.nr_lane;
+ struct rnp_hw *hw = port->hw;
+ uint32_t mac_cfg;
+
+ /* to protect conflict hw resource */
+ rte_spinlock_lock(&port->rx_mac_lock);
+ mac_cfg = RNP_MAC_REG_RD(hw, lane, RNP_MAC_RX_CFG);
+ mac_cfg &= ~RNP_MAC_RE;
+
+ RNP_MAC_REG_WR(hw, lane, RNP_MAC_RX_CFG, mac_cfg);
+ rte_spinlock_unlock(&port->rx_mac_lock);
+}
+
+static void rnp_mac_tx_enable(struct rte_eth_dev *dev)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ uint16_t lane = port->attr.nr_lane;
+ struct rnp_hw *hw = port->hw;
+ uint32_t mac_cfg;
+
+ mac_cfg = RNP_MAC_REG_RD(hw, lane, RNP_MAC_TX_CFG);
+ mac_cfg |= RNP_MAC_TE;
+ RNP_MAC_REG_WR(hw, lane, RNP_MAC_TX_CFG, mac_cfg);
+}
+
+static void rnp_mac_tx_disable(struct rte_eth_dev *dev)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ uint16_t lane = port->attr.nr_lane;
+ struct rnp_hw *hw = port->hw;
+ uint32_t ctrl;
+
+ /* must wait for tx side has send finish
+ * before fisable tx side
+ */
+ ctrl = RNP_MAC_REG_RD(hw, lane, RNP_MAC_TX_CFG);
+ ctrl &= ~RNP_MAC_TE;
+ RNP_MAC_REG_WR(hw, lane, RNP_MAC_TX_CFG, ctrl);
+}
+
+static void rnp_mac_init(struct rte_eth_dev *dev)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ uint16_t lane = port->attr.nr_lane;
+ struct rnp_hw *hw = port->hw;
+ uint32_t mac_cfg;
+
+ rnp_mac_tx_enable(dev);
+ rnp_mac_rx_enable(dev);
+
+ mac_cfg = RNP_MAC_REG_RD(hw, lane, RNP_MAC_LPI_CTRL);
+ mac_cfg |= RNP_MAC_PLSDIS | RNP_MAC_PLS;
+ RNP_MAC_REG_WR(hw, lane, RNP_MAC_LPI_CTRL, mac_cfg);
+}
+
+static int
+rnp_rx_scattered_setup(struct rte_eth_dev *dev)
+{
+ uint16_t max_pkt_size =
+ dev->data->dev_conf.rxmode.mtu + RNP_ETH_OVERHEAD;
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+ struct rnp_hw *hw = port->hw;
+ struct rnp_rx_queue *rxq;
+ uint16_t dma_buf_size;
+ uint16_t queue_id;
+ uint32_t dma_ctrl;
+
+ if (dev->data->rx_queues == NULL)
+ return -ENOMEM;
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
+ rxq = dev->data->rx_queues[queue_id];
+ if (!rxq)
+ continue;
+ if (hw->min_dma_size == 0)
+ hw->min_dma_size = rxq->rx_buf_len;
+ else
+ hw->min_dma_size = RTE_MIN(hw->min_dma_size,
+ rxq->rx_buf_len);
+ }
+ if (hw->min_dma_size < RNP_MIN_DMA_BUF_SIZE) {
+ RNP_PMD_ERR("port[%d] scatter dma len is not support %d",
+ dev->data->port_id, hw->min_dma_size);
+ return -ENOTSUP;
+ }
+ dma_buf_size = hw->min_dma_size;
+ /* Setup max dma scatter engine split size */
+ dma_ctrl = RNP_E_REG_RD(hw, RNP_DMA_CTRL);
+ if (max_pkt_size == dma_buf_size)
+ dma_buf_size += (dma_buf_size % 16);
+ RNP_PMD_INFO("PF[%d] MaxPktLen %d MbSize %d MbHeadRoom %d\n",
+ hw->mbx.pf_num, max_pkt_size,
+ dma_buf_size, RTE_PKTMBUF_HEADROOM);
+ dma_ctrl &= ~RNP_DMA_SCATTER_MEM_MASK;
+ dma_ctrl |= ((dma_buf_size / 16) << RNP_DMA_SCATTER_MEN_S);
+ RNP_E_REG_WR(hw, RNP_DMA_CTRL, dma_ctrl);
+
+ return 0;
+}
+
+static int rnp_enable_all_rx_queue(struct rte_eth_dev *dev)
+{
+ struct rnp_rx_queue *rxq;
+ uint16_t idx;
+ int ret = 0;
+
+ for (idx = 0; idx < dev->data->nb_rx_queues; idx++) {
+ rxq = dev->data->rx_queues[idx];
+ if (!rxq || rxq->rx_deferred_start)
+ continue;
+ if (dev->data->rx_queue_state[idx] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ ret = rnp_rx_queue_start(dev, idx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int rnp_enable_all_tx_queue(struct rte_eth_dev *dev)
+{
+ struct rnp_tx_queue *txq;
+ uint16_t idx;
+ int ret = 0;
+
+ for (idx = 0; idx < dev->data->nb_tx_queues; idx++) {
+ txq = dev->data->tx_queues[idx];
+ if (!txq || txq->tx_deferred_start)
+ continue;
+ if (dev->data->tx_queue_state[idx] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ ret = rnp_tx_queue_start(dev, idx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int rnp_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rnp_hw *hw = port->hw;
+ uint16_t lane = 0;
+ uint16_t idx = 0;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ lane = port->attr.nr_lane;
+ ret = rnp_clock_valid_check(hw, lane);
+ if (ret) {
+ RNP_PMD_ERR("port[%d] function[%d] lane[%d] hw clock error",
+ data->port_id, hw->mbx.pf_num, lane);
+ return ret;
+ }
+ /* disable eth rx flow */
+ RNP_RX_ETH_DISABLE(hw, lane);
+ ret = rnp_rx_scattered_setup(eth_dev);
+ if (ret)
+ return ret;
+ ret = rnp_enable_all_tx_queue(eth_dev);
+ if (ret)
+ goto txq_start_failed;
+ ret = rnp_enable_all_rx_queue(eth_dev);
+ if (ret)
+ goto rxq_start_failed;
+ rnp_mac_init(eth_dev);
+ /* enable eth rx flow */
+ RNP_RX_ETH_ENABLE(hw, lane);
+ port->port_stopped = 0;
+
+ return 0;
+rxq_start_failed:
+ for (idx = 0; idx < data->nb_rx_queues; idx++)
+ rnp_rx_queue_stop(eth_dev, idx);
+txq_start_failed:
+ for (idx = 0; idx < data->nb_tx_queues; idx++)
+ rnp_tx_queue_stop(eth_dev, idx);
+
+ return ret;
+}
+
+static int rnp_disable_all_rx_queue(struct rte_eth_dev *dev)
+{
+ struct rnp_rx_queue *rxq;
+ uint16_t idx;
+ int ret = 0;
+
+ for (idx = 0; idx < dev->data->nb_rx_queues; idx++) {
+ rxq = dev->data->rx_queues[idx];
+ if (!rxq || rxq->rx_deferred_start)
+ continue;
+ if (dev->data->rx_queue_state[idx] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ ret = rnp_rx_queue_stop(dev, idx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int rnp_disable_all_tx_queue(struct rte_eth_dev *dev)
+{
+ struct rnp_tx_queue *txq;
+ uint16_t idx;
+ int ret = 0;
+
+ for (idx = 0; idx < dev->data->nb_tx_queues; idx++) {
+ txq = dev->data->tx_queues[idx];
+ if (!txq || txq->tx_deferred_start)
+ continue;
+ if (dev->data->tx_queue_state[idx] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ ret = rnp_tx_queue_stop(dev, idx);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int rnp_dev_stop(struct rte_eth_dev *eth_dev)
{
- RTE_SET_USED(eth_dev);
+ struct rnp_eth_port *port = RNP_DEV_TO_PORT(eth_dev);
+ struct rte_eth_link link;
+
+ if (port->port_stopped)
+ return 0;
+ eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+ eth_dev->tx_pkt_prepare = rte_eth_pkt_burst_dummy;
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(eth_dev, &link);
+
+ rnp_disable_all_tx_queue(eth_dev);
+ rnp_disable_all_rx_queue(eth_dev);
+ rnp_mac_tx_disable(eth_dev);
+ rnp_mac_rx_disable(eth_dev);
+
+ eth_dev->data->dev_started = 0;
+ port->port_stopped = 1;
return 0;
}
@@ -230,6 +498,7 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev)
/* Features supported by this driver */
static const struct eth_dev_ops rnp_eth_dev_ops = {
.dev_close = rnp_dev_close,
+ .dev_start = rnp_dev_start,
.dev_stop = rnp_dev_stop,
.dev_infos_get = rnp_dev_infos_get,
@@ -313,6 +582,7 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev)
}
rte_ether_addr_copy(&port->mac_addr, ð_dev->data->mac_addrs[0]);
+ rte_spinlock_init(&port->rx_mac_lock);
adapter->ports[p_id] = port;
adapter->inited_ports++;
@@ -445,6 +715,8 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev)
ret = rnp_init_port_resource(adapter, sub_eth_dev, name, p_id);
if (ret)
goto eth_alloc_error;
+ rnp_mac_rx_disable(sub_eth_dev);
+ rnp_mac_tx_disable(sub_eth_dev);
if (p_id) {
/* port 0 will be probe by plaform */
rte_eth_dev_probing_finish(sub_eth_dev);
--
1.8.3.1
next prev parent reply other threads:[~2025-02-08 2:45 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-08 2:43 [PATCH v7 00/28] [v6]drivers/net Add Support mucse N10 Pmd Driver Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 01/28] net/rnp: add skeleton Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 02/28] net/rnp: add ethdev probe and remove Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 03/28] net/rnp: add log Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 04/28] net/rnp: support mailbox basic operate Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 05/28] net/rnp: add device init and uninit Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 06/28] net/rnp: add get device information operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 07/28] net/rnp: add support mac promisc mode Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 08/28] net/rnp: add queue setup and release operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 09/28] net/rnp: add queue stop and start operations Wenbo Cao
2025-02-08 2:43 ` Wenbo Cao [this message]
2025-02-08 2:43 ` [PATCH v7 11/28] net/rnp: add RSS support operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 12/28] net/rnp: add support link update operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 13/28] net/rnp: add support link setup operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 14/28] net/rnp: add Rx burst simple support Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 15/28] net/rnp: add Tx " Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 16/28] net/rnp: add MTU set operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 17/28] net/rnp: add Rx scatter segment version Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 18/28] net/rnp: add Tx multiple " Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 19/28] net/rnp: add support basic stats operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 20/28] net/rnp: add support xstats operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 21/28] net/rnp: add unicast MAC filter operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 22/28] net/rnp: add supported packet types Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 23/28] net/rnp: add support Rx checksum offload Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 24/28] net/rnp: add support Tx TSO offload Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 25/28] net/rnp: support VLAN offloads Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 26/28] net/rnp: add support VLAN filters operations Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 27/28] net/rnp: add queue info operation Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 28/28] net/rnp: support Rx/Tx burst mode info Wenbo Cao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1738982645-34550-11-git-send-email-caowenbo@mucse.com \
--to=caowenbo@mucse.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=stephen@networkplumber.org \
--cc=thomas@monjalon.net \
--cc=yaojun@mucse.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).