DPDK patches and discussions
 help / color / mirror / Atom feed
From: Apeksha Gupta <apeksha.gupta@nxp.com>
To: stephen@networkplumber.org, ferruh.yigit@intel.com
Cc: david.marchand@redhat.com, andrew.rybchenko@oktetlabs.ru,
	dev@dpdk.org, sachin.saxena@nxp.com, hemant.agrawal@nxp.com,
	Apeksha Gupta <apeksha.gupta@nxp.com>
Subject: [PATCH v11 4/5] net/enetfec: add Rx/Tx support
Date: Mon, 15 Nov 2021 12:49:39 +0530	[thread overview]
Message-ID: <20211115071940.12942-5-apeksha.gupta@nxp.com> (raw)
In-Reply-To: <20211115071940.12942-1-apeksha.gupta@nxp.com>

This patch adds burst enqueue and dequeue operations to the enetfec
PMD. Basic features added like promiscuous enable, basic stats.

Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 doc/guides/nics/enetfec.rst          |   2 +
 doc/guides/nics/features/enetfec.ini |   2 +
 drivers/net/enetfec/enet_ethdev.c    | 182 ++++++++++++++++++++++
 drivers/net/enetfec/enet_ethdev.h    |  25 +++
 drivers/net/enetfec/enet_rxtx.c      | 220 +++++++++++++++++++++++++++
 drivers/net/enetfec/meson.build      |   4 +-
 6 files changed, 434 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/enetfec/enet_rxtx.c

diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst
index 6a86295e34..209073e77c 100644
--- a/doc/guides/nics/enetfec.rst
+++ b/doc/guides/nics/enetfec.rst
@@ -84,6 +84,8 @@ driver.
 ENETFEC Features
 ~~~~~~~~~~~~~~~~~
 
+- Basic stats
+- Promiscuous
 - Linux
 - ARMv8
 
diff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini
index bdfbdbd9d4..3d8aa5b627 100644
--- a/doc/guides/nics/features/enetfec.ini
+++ b/doc/guides/nics/features/enetfec.ini
@@ -4,6 +4,8 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Promiscuous mode     = Y
+Basic stats	     = Y
 Linux		     = Y
 ARMv8		     = Y
 Usage doc	     = Y
diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index 0b8b73615d..9ac7501043 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -39,6 +39,8 @@
 #define ENETFEC_RAFL_V			0x8
 #define ENETFEC_OPD_V			0xFFF0
 
+/* Extended buffer descriptor */
+#define ENETFEC_EXTENDED_BD		0
 #define NUM_OF_BD_QUEUES		6
 
 /* Supported Rx offloads */
@@ -152,6 +154,38 @@ enetfec_restart(struct rte_eth_dev *dev)
 	rte_delay_us(10);
 }
 
+static void
+enet_free_buffers(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i, q;
+	struct rte_mbuf *mbuf;
+	struct bufdesc  *bdp;
+	struct enetfec_priv_rx_q *rxq;
+	struct enetfec_priv_tx_q *txq;
+
+	for (q = 0; q < dev->data->nb_rx_queues; q++) {
+		rxq = fep->rx_queues[q];
+		bdp = rxq->bd.base;
+		for (i = 0; i < rxq->bd.ring_size; i++) {
+			mbuf = rxq->rx_mbuf[i];
+			rxq->rx_mbuf[i] = NULL;
+			rte_pktmbuf_free(mbuf);
+			bdp = enet_get_nextdesc(bdp, &rxq->bd);
+		}
+	}
+
+	for (q = 0; q < dev->data->nb_tx_queues; q++) {
+		txq = fep->tx_queues[q];
+		bdp = txq->bd.base;
+		for (i = 0; i < txq->bd.ring_size; i++) {
+			mbuf = txq->tx_mbuf[i];
+			txq->tx_mbuf[i] = NULL;
+			rte_pktmbuf_free(mbuf);
+		}
+	}
+}
+
 static int
 enetfec_eth_configure(struct rte_eth_dev *dev)
 {
@@ -165,6 +199,8 @@ static int
 enetfec_eth_start(struct rte_eth_dev *dev)
 {
 	enetfec_restart(dev);
+	dev->rx_pkt_burst = &enetfec_recv_pkts;
+	dev->tx_pkt_burst = &enetfec_xmit_pkts;
 
 	return 0;
 }
@@ -191,6 +227,101 @@ enetfec_eth_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+enetfec_eth_close(struct rte_eth_dev *dev)
+{
+	enet_free_buffers(dev);
+	return 0;
+}
+
+static int
+enetfec_eth_link_update(struct rte_eth_dev *dev,
+			int wait_to_complete __rte_unused)
+{
+	struct rte_eth_link link;
+	unsigned int lstatus = 1;
+
+	memset(&link, 0, sizeof(struct rte_eth_link));
+
+	link.link_status = lstatus;
+	link.link_speed = RTE_ETH_SPEED_NUM_1G;
+
+	ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
+			 "Up");
+
+	return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+enetfec_promiscuous_enable(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	uint32_t tmp;
+
+	tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+	tmp |= 0x8;
+	tmp &= ~0x2;
+	rte_write32(rte_cpu_to_le_32(tmp),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+
+	return 0;
+}
+
+static int
+enetfec_multicast_enable(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+
+	rte_write32(rte_cpu_to_le_32(0xffffffff),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+	rte_write32(rte_cpu_to_le_32(0xffffffff),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+	dev->data->all_multicast = 1;
+
+	rte_write32(rte_cpu_to_le_32(0x04400002),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+	rte_write32(rte_cpu_to_le_32(0x10800049),
+			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+
+	return 0;
+}
+
+/* Set a MAC change in hardware. */
+static int
+enetfec_set_mac_address(struct rte_eth_dev *dev,
+		    struct rte_ether_addr *addr)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+
+	writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
+		(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
+	writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
+		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
+
+	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+
+	return 0;
+}
+
+static int
+enetfec_stats_get(struct rte_eth_dev *dev,
+	      struct rte_eth_stats *stats)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	struct rte_eth_stats *eth_stats = &fep->stats;
+
+	stats->ipackets = eth_stats->ipackets;
+	stats->ibytes = eth_stats->ibytes;
+	stats->ierrors = eth_stats->ierrors;
+	stats->opackets = eth_stats->opackets;
+	stats->obytes = eth_stats->obytes;
+	stats->oerrors = eth_stats->oerrors;
+	stats->rx_nombuf = eth_stats->rx_nombuf;
+
+	return 0;
+}
+
 static int
 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
 	struct rte_eth_dev_info *dev_info)
@@ -202,6 +333,18 @@ enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
 	return 0;
 }
 
+static void
+enet_free_queue(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		rte_free(fep->rx_queues[i]);
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		rte_free(fep->rx_queues[i]);
+}
+
 static const unsigned short offset_des_active_rxq[] = {
 	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
 };
@@ -405,6 +548,12 @@ static const struct eth_dev_ops enetfec_ops = {
 	.dev_configure          = enetfec_eth_configure,
 	.dev_start              = enetfec_eth_start,
 	.dev_stop               = enetfec_eth_stop,
+	.dev_close              = enetfec_eth_close,
+	.link_update            = enetfec_eth_link_update,
+	.promiscuous_enable     = enetfec_promiscuous_enable,
+	.allmulticast_enable    = enetfec_multicast_enable,
+	.mac_addr_set           = enetfec_set_mac_address,
+	.stats_get              = enetfec_stats_get,
 	.dev_infos_get          = enetfec_eth_info,
 	.rx_queue_setup         = enetfec_rx_queue_setup,
 	.tx_queue_setup         = enetfec_tx_queue_setup
@@ -430,6 +579,9 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
 	int rc;
 	int i;
 	unsigned int bdsize;
+	struct rte_ether_addr macaddr = {
+		.addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
+	};
 
 	name = rte_vdev_device_name(vdev);
 	ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
@@ -470,6 +622,21 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
 	}
 
+	/* Copy the station address into the dev structure, */
+	dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
+	if (dev->data->mac_addrs == NULL) {
+		ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
+			RTE_ETHER_ADDR_LEN);
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	/*
+	 * Set default mac address
+	 */
+	enetfec_set_mac_address(dev, &macaddr);
+
+	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
 	rc = enetfec_eth_init(dev);
 	if (rc)
 		goto failed_init;
@@ -478,6 +645,8 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
 
 failed_init:
 	ENETFEC_PMD_ERR("Failed to init");
+err:
+	rte_eth_dev_release_port(dev);
 	return rc;
 }
 
@@ -485,6 +654,8 @@ static int
 pmd_enetfec_remove(struct rte_vdev_device *vdev)
 {
 	struct rte_eth_dev *eth_dev = NULL;
+	struct enetfec_private *fep;
+	struct enetfec_priv_rx_q *rxq;
 	int ret;
 
 	/* find the ethdev entry */
@@ -492,11 +663,22 @@ pmd_enetfec_remove(struct rte_vdev_device *vdev)
 	if (eth_dev == NULL)
 		return -ENODEV;
 
+	fep = eth_dev->data->dev_private;
+	/* Free descriptor base of first RX queue as it was configured
+	 * first in enetfec_eth_init().
+	 */
+	rxq = fep->rx_queues[0];
+	rte_free(rxq->bd.base);
+	enet_free_queue(eth_dev);
+	enetfec_eth_stop(eth_dev);
+
 	ret = rte_eth_dev_release_port(eth_dev);
 	if (ret != 0)
 		return -EINVAL;
 
 	ENETFEC_PMD_INFO("Release enetfec sw device");
+	enetfec_cleanup(fep);
+
 	return 0;
 }
 
diff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h
index 27e124c339..06a6c10600 100644
--- a/drivers/net/enetfec/enet_ethdev.h
+++ b/drivers/net/enetfec/enet_ethdev.h
@@ -7,6 +7,10 @@
 
 #include <rte_ethdev.h>
 
+#define BD_LEN			49152
+#define ENETFEC_TX_FR_SIZE	2048
+#define ETH_HLEN		RTE_ETHER_HDR_LEN
+
 /* full duplex */
 #define FULL_DUPLEX		0x00
 
@@ -17,6 +21,21 @@
 #define ENETFEC_MAX_RX_PKT_LEN	3000
 
 #define __iomem
+#if defined(RTE_ARCH_ARM)
+#if defined(RTE_ARCH_64)
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dcbf_64(p) dcbf(p)
+
+#else /* RTE_ARCH_32 */
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#endif
+
+#else
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#endif
+
 /*
  * ENETFEC can support 1 rx and tx queue..
  */
@@ -71,6 +90,7 @@ struct enetfec_priv_rx_q {
 
 struct enetfec_private {
 	struct rte_eth_dev	*dev;
+	struct rte_eth_stats	stats;
 	int			full_duplex;
 	int			flag_pause;
 	uint32_t		quirks;
@@ -123,4 +143,9 @@ enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd)
 	return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2;
 }
 
+uint16_t enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+uint16_t enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
 #endif /*__ENETFEC_ETHDEV_H__*/
diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
new file mode 100644
index 0000000000..e61a217dcb
--- /dev/null
+++ b/drivers/net/enetfec/enet_rxtx.c
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <signal.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include "enet_regs.h"
+#include "enet_ethdev.h"
+#include "enet_pmd_logs.h"
+
+/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
+ * When update through the ring, just set the empty indicator.
+ */
+uint16_t
+enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct rte_mempool *pool;
+	struct bufdesc *bdp;
+	struct rte_mbuf *mbuf, *new_mbuf = NULL;
+	unsigned short status;
+	unsigned short pkt_len;
+	int pkt_received = 0, index = 0;
+	void *data;
+	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
+	struct rte_eth_stats *stats = &rxq->fep->stats;
+	pool = rxq->pool;
+	bdp = rxq->bd.cur;
+
+	/* Process the incoming packet */
+	status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+	while ((status & RX_BD_EMPTY) == 0) {
+		if (pkt_received >= nb_pkts)
+			break;
+
+		new_mbuf = rte_pktmbuf_alloc(pool);
+		if (unlikely(new_mbuf == NULL)) {
+			stats->rx_nombuf++;
+			break;
+		}
+		/* Check for errors. */
+		status ^= RX_BD_LAST;
+		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+			RX_BD_TR)) {
+			stats->ierrors++;
+			if (status & RX_BD_OV) {
+				/* FIFO overrun */
+				/* enet_dump_rx(rxq); */
+				ENETFEC_DP_LOG(DEBUG, "rx_fifo_error");
+				goto rx_processing_done;
+			}
+			if (status & (RX_BD_LG | RX_BD_SH
+						| RX_BD_LAST)) {
+				/* Frame too long or too short. */
+				ENETFEC_DP_LOG(DEBUG, "rx_length_error");
+				if (status & RX_BD_LAST)
+					ENETFEC_DP_LOG(DEBUG, "rcv is not +last");
+			}
+			if (status & RX_BD_CR) {     /* CRC Error */
+				ENETFEC_DP_LOG(DEBUG, "rx_crc_errors");
+			}
+			/* Report late collisions as a frame error. */
+			if (status & (RX_BD_NO | RX_BD_TR))
+				ENETFEC_DP_LOG(DEBUG, "rx_frame_error");
+			goto rx_processing_done;
+		}
+
+		/* Process the incoming frame. */
+		stats->ipackets++;
+		pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
+		stats->ibytes += pkt_len;
+
+		/* shows data with respect to the data_off field. */
+		index = enet_get_bd_index(bdp, &rxq->bd);
+		mbuf = rxq->rx_mbuf[index];
+
+		data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+		rte_prefetch0(data);
+		rte_pktmbuf_append((struct rte_mbuf *)mbuf,
+				pkt_len - 4);
+
+		if (rxq->fep->quirks & QUIRK_RACC)
+			data = rte_pktmbuf_adj(mbuf, 2);
+
+		rx_pkts[pkt_received] = mbuf;
+		pkt_received++;
+		rxq->rx_mbuf[index] = new_mbuf;
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+				&bdp->bd_bufaddr);
+rx_processing_done:
+		/* when rx_processing_done clear the status flags
+		 * for this buffer
+		 */
+		status &= ~RX_BD_STATS;
+
+		/* Mark the buffer empty */
+		status |= RX_BD_EMPTY;
+
+		if (rxq->fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			rte_write32(rte_cpu_to_le_32(RX_BD_INT),
+				    &ebdp->bd_esc);
+			rte_write32(0, &ebdp->bd_prot);
+			rte_write32(0, &ebdp->bd_bdu);
+		}
+
+		/* Make sure the updates to rest of the descriptor are
+		 * performed before transferring ownership.
+		 */
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+		/* Update BD pointer to next entry */
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.
+		 */
+		rte_write32(0, rxq->bd.active_reg_desc);
+		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+	}
+	rxq->bd.cur = bdp;
+	return pkt_received;
+}
+
+uint16_t
+enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct enetfec_priv_tx_q *txq  =
+			(struct enetfec_priv_tx_q *)tx_queue;
+	struct rte_eth_stats *stats = &txq->fep->stats;
+	struct bufdesc *bdp, *last_bdp;
+	struct rte_mbuf *mbuf;
+	unsigned short status;
+	unsigned short buflen;
+	unsigned int index, estatus = 0;
+	unsigned int i, pkt_transmitted = 0;
+	uint8_t *data;
+	int tx_st = 1;
+
+	while (tx_st) {
+		if (pkt_transmitted >= nb_pkts) {
+			tx_st = 0;
+			break;
+		}
+		bdp = txq->bd.cur;
+		/* First clean the ring */
+		index = enet_get_bd_index(bdp, &txq->bd);
+		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+
+		if (status & TX_BD_READY) {
+			stats->oerrors++;
+			break;
+		}
+		if (txq->tx_mbuf[index]) {
+			rte_pktmbuf_free(txq->tx_mbuf[index]);
+			txq->tx_mbuf[index] = NULL;
+		}
+
+		mbuf = *(tx_pkts);
+		tx_pkts++;
+
+		/* Fill in a Tx ring entry */
+		last_bdp = bdp;
+		status &= ~TX_BD_STATS;
+
+		/* Set buffer length and buffer pointer */
+		buflen = rte_pktmbuf_pkt_len(mbuf);
+		stats->opackets++;
+		stats->obytes += buflen;
+
+		if (mbuf->nb_segs > 1) {
+			ENETFEC_DP_LOG(DEBUG, "SG not supported");
+			return -1;
+		}
+		status |= (TX_BD_LAST);
+		data = rte_pktmbuf_mtod(mbuf, void *);
+		for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
+			dcbf(data + i);
+
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+			    &bdp->bd_bufaddr);
+		rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
+
+		if (txq->fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			rte_write32(0, &ebdp->bd_bdu);
+			rte_write32(rte_cpu_to_le_32(estatus),
+				    &ebdp->bd_esc);
+		}
+
+		index = enet_get_bd_index(last_bdp, &txq->bd);
+		/* Save mbuf pointer */
+		txq->tx_mbuf[index] = mbuf;
+
+		/* Make sure the updates to rest of the descriptor are performed
+		 * before transferring ownership.
+		 */
+		status |= (TX_BD_READY | TX_BD_TC);
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+		/* Trigger transmission start */
+		rte_write32(0, txq->bd.active_reg_desc);
+		pkt_transmitted++;
+
+		/* If this was the last BD in the ring, start at the
+		 * beginning again.
+		 */
+		bdp = enet_get_nextdesc(last_bdp, &txq->bd);
+
+		/* Make sure the update to bdp and tx_skbuff are performed
+		 * before txq->bd.cur.
+		 */
+		txq->bd.cur = bdp;
+	}
+	return nb_pkts;
+}
diff --git a/drivers/net/enetfec/meson.build b/drivers/net/enetfec/meson.build
index 3fb0f73071..551cd5358c 100644
--- a/drivers/net/enetfec/meson.build
+++ b/drivers/net/enetfec/meson.build
@@ -8,4 +8,6 @@ endif
 
 sources = files(
         'enet_ethdev.c',
-        'enet_uio.c')
+        'enet_uio.c',
+        'enet_rxtx.c'
+)
-- 
2.17.1


  parent reply	other threads:[~2021-11-15  7:20 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-01 11:42 [dpdk-dev] [PATCH v4 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-19 18:39   ` [dpdk-dev] [PATCH v5 0/5] drivers/net: add " Apeksha Gupta
2021-10-19 18:39     ` [dpdk-dev] [PATCH v5 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-21  4:46       ` [dpdk-dev] [PATCH v6 0/5] drivers/net: add " Apeksha Gupta
2021-10-21  4:46         ` [dpdk-dev] [PATCH v6 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-21  5:24           ` Hemant Agrawal
2021-10-27 14:18           ` Ferruh Yigit
2021-11-08 18:42             ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-03 19:20           ` [dpdk-dev] [PATCH v7 0/5] drivers/net: add " Apeksha Gupta
2021-11-03 19:20             ` [dpdk-dev] [PATCH v7 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-03 23:27               ` Ferruh Yigit
2021-11-04 18:24               ` Ferruh Yigit
2021-11-08 19:13                 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-09 11:34               ` [dpdk-dev] [PATCH v8 0/5] drivers/net: add " Apeksha Gupta
2021-11-09 11:34                 ` [dpdk-dev] [PATCH v8 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-09 11:34                 ` [dpdk-dev] [PATCH v8 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-09 11:34                 ` [dpdk-dev] [PATCH v8 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-09 11:34                 ` [dpdk-dev] [PATCH v8 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-09 11:34                 ` [dpdk-dev] [PATCH v8 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10  7:48                   ` [dpdk-dev] [PATCH v9 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-11-10  7:48                     ` [dpdk-dev] [PATCH v9 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-10 13:53                       ` Ferruh Yigit
2021-11-13  4:31                       ` [PATCH v10 0/5] drivers/net: add " Apeksha Gupta
2021-11-13  4:31                         ` [PATCH v10 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-15  7:19                           ` [PATCH v11 0/5] drivers/net: add " Apeksha Gupta
2021-11-15  7:19                             ` [PATCH v11 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-15 10:07                               ` Ferruh Yigit
2023-03-21 18:03                               ` Ferruh Yigit
2023-03-23  6:00                                 ` Sachin Saxena (OSS)
2023-03-23 11:07                                   ` Ferruh Yigit
2023-03-23 11:09                                     ` Sachin Saxena (OSS)
2021-11-15  7:19                             ` [PATCH v11 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-15  7:19                             ` [PATCH v11 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-15 10:11                               ` Ferruh Yigit
2021-11-15 10:24                                 ` Ferruh Yigit
2021-11-15 11:15                                   ` Ferruh Yigit
2021-11-15  7:19                             ` Apeksha Gupta [this message]
2021-11-15  7:19                             ` [PATCH v11 5/5] net/enetfec: add features Apeksha Gupta
2021-11-15  9:44                             ` [PATCH v11 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-11-15 15:05                             ` Ferruh Yigit
2021-11-25 16:52                               ` Ferruh Yigit
2021-11-13  4:31                         ` [PATCH v10 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-13  4:31                         ` [PATCH v10 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-13 17:11                           ` Stephen Hemminger
2021-11-13  4:31                         ` [PATCH v10 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-13 17:10                           ` Stephen Hemminger
2021-11-13  4:31                         ` [PATCH v10 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10  7:48                     ` [dpdk-dev] [PATCH v9 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-10  7:48                     ` [dpdk-dev] [PATCH v9 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-10 13:54                       ` Ferruh Yigit
2021-11-13  5:00                         ` [EXT] " Apeksha Gupta
2021-11-15 10:06                         ` Ferruh Yigit
2021-11-15 10:23                           ` Ferruh Yigit
2021-11-15 10:29                             ` Ferruh Yigit
2021-11-10  7:48                     ` [dpdk-dev] [PATCH v9 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-10 13:56                       ` Ferruh Yigit
2021-11-10  7:48                     ` [dpdk-dev] [PATCH v9 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10 13:57                       ` Ferruh Yigit
2021-11-03 19:20             ` [dpdk-dev] [PATCH v7 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-04 18:25               ` Ferruh Yigit
2021-11-08 20:24                 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-08 21:51                   ` Ferruh Yigit
2021-11-03 19:20             ` [dpdk-dev] [PATCH v7 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-04 18:26               ` Ferruh Yigit
2021-11-03 19:20             ` [dpdk-dev] [PATCH v7 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-04 18:28               ` Ferruh Yigit
2021-11-09 16:20                 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-03 19:20             ` [dpdk-dev] [PATCH v7 5/5] net/enetfec: add features Apeksha Gupta
2021-11-04 18:31             ` [dpdk-dev] [PATCH v7 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-10-21  4:46         ` [dpdk-dev] [PATCH v6 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-27 14:21           ` Ferruh Yigit
2021-11-08 18:44             ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21  4:46         ` [dpdk-dev] [PATCH v6 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-27 14:23           ` Ferruh Yigit
2021-11-08 18:45             ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21  4:46         ` [dpdk-dev] [PATCH v6 4/5] net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-10-27 14:25           ` Ferruh Yigit
2021-11-08 18:47             ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21  4:47         ` [dpdk-dev] [PATCH v6 5/5] net/enetfec: add features Apeksha Gupta
2021-10-27 14:26           ` Ferruh Yigit
2021-10-27 14:15         ` [dpdk-dev] [PATCH v6 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-10-19 18:40     ` [dpdk-dev] [PATCH v5 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-19 18:40     ` [dpdk-dev] [PATCH v5 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-19 18:40     ` [dpdk-dev] [PATCH v5 4/5] net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-10-19 18:40     ` [dpdk-dev] [PATCH v5 5/5] net/enetfec: add features Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 4/5] net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 5/5] net/enetfec: add features Apeksha Gupta
2021-10-17 10:49 ` [dpdk-dev] [PATCH v4 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211115071940.12942-5-apeksha.gupta@nxp.com \
    --to=apeksha.gupta@nxp.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=sachin.saxena@nxp.com \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).