From: Apeksha Gupta <apeksha.gupta@nxp.com>
To: david.marchand@redhat.com, ferruh.yigit@intel.com,
andrew.rybchenko@oktetlabs.ru
Cc: dev@dpdk.org, sachin.saxena@nxp.com, hemant.agrawal@nxp.com,
Apeksha Gupta <apeksha.gupta@nxp.com>
Subject: [dpdk-dev] [PATCH v8 3/5] net/enetfec: support queue configuration
Date: Tue, 9 Nov 2021 17:04:30 +0530 [thread overview]
Message-ID: <20211109113432.11876-4-apeksha.gupta@nxp.com> (raw)
In-Reply-To: <20211109113432.11876-1-apeksha.gupta@nxp.com>
This patch adds Rx/Tx queue configuration setup operations.
On packet reception the respective BD Ring status bit is set
which is then used for packet processing.
Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
drivers/net/enetfec/enet_ethdev.c | 222 +++++++++++++++++++++++++++++-
drivers/net/enetfec/enet_ethdev.h | 74 ++++++++++
2 files changed, 295 insertions(+), 1 deletion(-)
diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index fe6b5e539f..f70489ff91 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -41,6 +41,11 @@
#define NUM_OF_BD_QUEUES 6
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ RTE_ETH_RX_OFFLOAD_CHECKSUM |
+ RTE_ETH_RX_OFFLOAD_VLAN;
+
/*
* This function is called to start or restart the ENETFEC during a link
* change, transmit timeout, or to reconfigure the ENETFEC. The network
@@ -186,10 +191,225 @@ enetfec_eth_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
+ dev_info->max_rx_queues = ENETFEC_MAX_Q;
+ dev_info->max_tx_queues = ENETFEC_MAX_Q;
+ dev_info->rx_offload_capa = dev_rx_offloads_sup;
+ return 0;
+}
+
+static const unsigned short offset_des_active_rxq[] = {
+ ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
+};
+
+static const unsigned short offset_des_active_txq[] = {
+ ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
+};
+
+static int
+enetfec_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ unsigned int i;
+ struct bufdesc *bdp, *bd_base;
+ struct enetfec_priv_tx_q *txq;
+ unsigned int size;
+ unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+ sizeof(struct bufdesc);
+ unsigned int dsize_log2 = fls64(dsize);
+
+ /* Tx deferred start is not supported */
+ if (tx_conf->tx_deferred_start) {
+ ENETFEC_PMD_ERR("%p:Tx deferred start not supported",
+ (void *)dev);
+ return -EINVAL;
+ }
+
+ /* allocate transmit queue */
+ txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
+ if (txq == NULL) {
+ ENETFEC_PMD_ERR("transmit queue allocation failed");
+ return -ENOMEM;
+ }
+
+ if (nb_desc > MAX_TX_BD_RING_SIZE) {
+ nb_desc = MAX_TX_BD_RING_SIZE;
+ ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
+ }
+ txq->bd.ring_size = nb_desc;
+ fep->total_tx_ring_size += txq->bd.ring_size;
+ fep->tx_queues[queue_idx] = txq;
+
+ rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
+
+ /* Set transmit descriptor base. */
+ txq = fep->tx_queues[queue_idx];
+ txq->fep = fep;
+ size = dsize * txq->bd.ring_size;
+ bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
+ txq->bd.queue_id = queue_idx;
+ txq->bd.base = bd_base;
+ txq->bd.cur = bd_base;
+ txq->bd.d_size = dsize;
+ txq->bd.d_size_log2 = dsize_log2;
+ txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
+ offset_des_active_txq[queue_idx];
+ bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
+ txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
+ bdp = txq->bd.base;
+ bdp = txq->bd.cur;
+
+ for (i = 0; i < txq->bd.ring_size; i++) {
+ /* Initialize the BD for every fragment in the page. */
+ rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
+ if (txq->tx_mbuf[i] != NULL) {
+ rte_pktmbuf_free(txq->tx_mbuf[i]);
+ txq->tx_mbuf[i] = NULL;
+ }
+ rte_write32(0, &bdp->bd_bufaddr);
+ bdp = enet_get_nextdesc(bdp, &txq->bd);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = enet_get_prevdesc(bdp, &txq->bd);
+ rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
+ rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
+ txq->dirty_tx = bdp;
+ dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
+ return 0;
+}
+
+static int
+enetfec_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ unsigned int i;
+ struct bufdesc *bd_base;
+ struct bufdesc *bdp;
+ struct enetfec_priv_rx_q *rxq;
+ unsigned int size;
+ unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+ sizeof(struct bufdesc);
+ unsigned int dsize_log2 = fls64(dsize);
+
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ ENETFEC_PMD_ERR("%p:Rx deferred start not supported",
+ (void *)dev);
+ return -EINVAL;
+ }
+
+ /* allocate receive queue */
+ rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL) {
+ ENETFEC_PMD_ERR("receive queue allocation failed");
+ return -ENOMEM;
+ }
+
+ if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
+ nb_rx_desc = MAX_RX_BD_RING_SIZE;
+ ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
+ }
+
+ rxq->bd.ring_size = nb_rx_desc;
+ fep->total_rx_ring_size += rxq->bd.ring_size;
+ fep->rx_queues[queue_idx] = rxq;
+
+ rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
+ rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
+
+ /* Set receive descriptor base. */
+ rxq = fep->rx_queues[queue_idx];
+ rxq->pool = mb_pool;
+ size = dsize * rxq->bd.ring_size;
+ bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
+ rxq->bd.queue_id = queue_idx;
+ rxq->bd.base = bd_base;
+ rxq->bd.cur = bd_base;
+ rxq->bd.d_size = dsize;
+ rxq->bd.d_size_log2 = dsize_log2;
+ rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
+ offset_des_active_rxq[queue_idx];
+ bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
+ rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
+
+ rxq->fep = fep;
+ bdp = rxq->bd.base;
+ rxq->bd.cur = bdp;
+
+ for (i = 0; i < nb_rx_desc; i++) {
+ /* Initialize Rx buffers from pktmbuf pool */
+ struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
+ if (mbuf == NULL) {
+ ENETFEC_PMD_ERR("mbuf failed");
+ goto err_alloc;
+ }
+
+ /* Get the virtual address & physical address */
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+ &bdp->bd_bufaddr);
+
+ rxq->rx_mbuf[i] = mbuf;
+ rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
+
+ bdp = enet_get_nextdesc(bdp, &rxq->bd);
+ }
+
+ /* Initialize the receive buffer descriptors. */
+ bdp = rxq->bd.cur;
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ /* Initialize the BD for every fragment in the page. */
+ if (rte_read32(&bdp->bd_bufaddr) > 0)
+ rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
+ &bdp->bd_sc);
+ else
+ rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
+
+ bdp = enet_get_nextdesc(bdp, &rxq->bd);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = enet_get_prevdesc(bdp, &rxq->bd);
+ rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
+ rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
+ dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
+ rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
+ return 0;
+
+err_alloc:
+ for (i = 0; i < nb_rx_desc; i++) {
+ if (rxq->rx_mbuf[i] != NULL) {
+ rte_pktmbuf_free(rxq->rx_mbuf[i]);
+ rxq->rx_mbuf[i] = NULL;
+ }
+ }
+ rte_free(rxq);
+ return errno;
+}
+
static const struct eth_dev_ops enetfec_ops = {
.dev_configure = enetfec_eth_configure,
.dev_start = enetfec_eth_start,
- .dev_stop = enetfec_eth_stop
+ .dev_stop = enetfec_eth_stop,
+ .dev_infos_get = enetfec_eth_info,
+ .rx_queue_setup = enetfec_rx_queue_setup,
+ .tx_queue_setup = enetfec_tx_queue_setup
};
static int
diff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h
index 0f0684ab11..babc7190fb 100644
--- a/drivers/net/enetfec/enet_ethdev.h
+++ b/drivers/net/enetfec/enet_ethdev.h
@@ -10,8 +10,13 @@
/* full duplex */
#define FULL_DUPLEX 0x00
+#define MAX_TX_BD_RING_SIZE 512 /* It should be power of 2 */
+#define MAX_RX_BD_RING_SIZE 512
#define PKT_MAX_BUF_SIZE 1984
#define OPT_FRAME_SIZE (PKT_MAX_BUF_SIZE << 16)
+#define ENETFEC_MAX_RX_PKT_LEN 3000
+
+#define __iomem
/*
* ENETFEC can support 1 rx and tx queue..
@@ -22,6 +27,49 @@
#define writel(v, p) ({*(volatile unsigned int *)(p) = (v); })
#define readl(p) rte_read32(p)
+struct bufdesc {
+ uint16_t bd_datlen; /* buffer data length */
+ uint16_t bd_sc; /* buffer control & status */
+ uint32_t bd_bufaddr; /* buffer address */
+};
+
+struct bufdesc_ex {
+ struct bufdesc desc;
+ uint32_t bd_esc;
+ uint32_t bd_prot;
+ uint32_t bd_bdu;
+ uint32_t ts;
+ uint16_t res0[4];
+};
+
+struct bufdesc_prop {
+ int queue_id;
+ /* Addresses of Tx and Rx buffers */
+ struct bufdesc *base;
+ struct bufdesc *last;
+ struct bufdesc *cur;
+ void __iomem *active_reg_desc;
+ uint64_t descr_baseaddr_p;
+ unsigned short ring_size;
+ unsigned char d_size;
+ unsigned char d_size_log2;
+};
+
+struct enetfec_priv_tx_q {
+ struct bufdesc_prop bd;
+ struct rte_mbuf *tx_mbuf[MAX_TX_BD_RING_SIZE];
+ struct bufdesc *dirty_tx;
+ struct rte_mempool *pool;
+ struct enetfec_private *fep;
+};
+
+struct enetfec_priv_rx_q {
+ struct bufdesc_prop bd;
+ struct rte_mbuf *rx_mbuf[MAX_RX_BD_RING_SIZE];
+ struct rte_mempool *pool;
+ struct enetfec_private *fep;
+};
+
struct enetfec_private {
struct rte_eth_dev *dev;
struct rte_eth_stats stats;
@@ -54,4 +102,30 @@ struct enetfec_private {
struct enetfec_priv_tx_q *tx_queues[ENETFEC_MAX_Q];
};
+static inline struct
+bufdesc *enet_get_nextdesc(struct bufdesc *bdp, struct bufdesc_prop *bd)
+{
+ return (bdp >= bd->last) ? bd->base
+ : (struct bufdesc *)(((uintptr_t)bdp) + bd->d_size);
+}
+
+static inline int
+fls64(unsigned long word)
+{
+ return (64 - __builtin_clzl(word)) - 1;
+}
+
+static inline struct
+bufdesc *enet_get_prevdesc(struct bufdesc *bdp, struct bufdesc_prop *bd)
+{
+ return (bdp <= bd->base) ? bd->last
+ : (struct bufdesc *)(((uintptr_t)bdp) - bd->d_size);
+}
+
+static inline int
+enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd)
+{
+ return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2;
+}
+
#endif /*__ENETFEC_ETHDEV_H__*/
--
2.17.1
next prev parent reply other threads:[~2021-11-09 11:35 UTC|newest]
Thread overview: 91+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-01 11:42 [dpdk-dev] [PATCH v4 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-19 18:39 ` [dpdk-dev] [PATCH v5 0/5] drivers/net: add " Apeksha Gupta
2021-10-19 18:39 ` [dpdk-dev] [PATCH v5 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 0/5] drivers/net: add " Apeksha Gupta
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-21 5:24 ` Hemant Agrawal
2021-10-27 14:18 ` Ferruh Yigit
2021-11-08 18:42 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 0/5] drivers/net: add " Apeksha Gupta
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-03 23:27 ` Ferruh Yigit
2021-11-04 18:24 ` Ferruh Yigit
2021-11-08 19:13 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 0/5] drivers/net: add " Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-09 11:34 ` Apeksha Gupta [this message]
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-10 13:53 ` Ferruh Yigit
2021-11-13 4:31 ` [PATCH v10 0/5] drivers/net: add " Apeksha Gupta
2021-11-13 4:31 ` [PATCH v10 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-15 7:19 ` [PATCH v11 0/5] drivers/net: add " Apeksha Gupta
2021-11-15 7:19 ` [PATCH v11 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-15 10:07 ` Ferruh Yigit
2023-03-21 18:03 ` Ferruh Yigit
2023-03-23 6:00 ` Sachin Saxena (OSS)
2023-03-23 11:07 ` Ferruh Yigit
2023-03-23 11:09 ` Sachin Saxena (OSS)
2021-11-15 7:19 ` [PATCH v11 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-15 7:19 ` [PATCH v11 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-15 10:11 ` Ferruh Yigit
2021-11-15 10:24 ` Ferruh Yigit
2021-11-15 11:15 ` Ferruh Yigit
2021-11-15 7:19 ` [PATCH v11 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-15 7:19 ` [PATCH v11 5/5] net/enetfec: add features Apeksha Gupta
2021-11-15 9:44 ` [PATCH v11 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-11-15 15:05 ` Ferruh Yigit
2021-11-25 16:52 ` Ferruh Yigit
2021-11-13 4:31 ` [PATCH v10 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-13 4:31 ` [PATCH v10 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-13 17:11 ` Stephen Hemminger
2021-11-13 4:31 ` [PATCH v10 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-13 17:10 ` Stephen Hemminger
2021-11-13 4:31 ` [PATCH v10 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-10 13:54 ` Ferruh Yigit
2021-11-13 5:00 ` [EXT] " Apeksha Gupta
2021-11-15 10:06 ` Ferruh Yigit
2021-11-15 10:23 ` Ferruh Yigit
2021-11-15 10:29 ` Ferruh Yigit
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-10 13:56 ` Ferruh Yigit
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10 13:57 ` Ferruh Yigit
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-04 18:25 ` Ferruh Yigit
2021-11-08 20:24 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-08 21:51 ` Ferruh Yigit
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-04 18:26 ` Ferruh Yigit
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-04 18:28 ` Ferruh Yigit
2021-11-09 16:20 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 5/5] net/enetfec: add features Apeksha Gupta
2021-11-04 18:31 ` [dpdk-dev] [PATCH v7 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-27 14:21 ` Ferruh Yigit
2021-11-08 18:44 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-27 14:23 ` Ferruh Yigit
2021-11-08 18:45 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 4/5] net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-10-27 14:25 ` Ferruh Yigit
2021-11-08 18:47 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21 4:47 ` [dpdk-dev] [PATCH v6 5/5] net/enetfec: add features Apeksha Gupta
2021-10-27 14:26 ` Ferruh Yigit
2021-10-27 14:15 ` [dpdk-dev] [PATCH v6 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-10-19 18:40 ` [dpdk-dev] [PATCH v5 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-19 18:40 ` [dpdk-dev] [PATCH v5 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-19 18:40 ` [dpdk-dev] [PATCH v5 4/5] net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-10-19 18:40 ` [dpdk-dev] [PATCH v5 5/5] net/enetfec: add features Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 4/5] net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 5/5] net/enetfec: add features Apeksha Gupta
2021-10-17 10:49 ` [dpdk-dev] [PATCH v4 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211109113432.11876-4-apeksha.gupta@nxp.com \
--to=apeksha.gupta@nxp.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=hemant.agrawal@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).