DPDK patches and discussions
 help / color / mirror / Atom feed
From: Apeksha Gupta <apeksha.gupta@nxp.com>
To: ferruh.yigit@intel.com
Cc: dev@dpdk.org, hemant.agrawal@nxp.com, sachin.saxena@nxp.com,
	Apeksha Gupta <apeksha.gupta@nxp.com>
Subject: [dpdk-dev] [PATCH 4/4] drivers/net/enetfec: add enqueue and dequeue support
Date: Fri, 30 Apr 2021 10:04:24 +0530	[thread overview]
Message-ID: <20210430043424.19752-5-apeksha.gupta@nxp.com> (raw)
In-Reply-To: <20210430043424.19752-1-apeksha.gupta@nxp.com>

This patch supported checksum offloads and add burst enqueue and
dequeue operations to the enetfec PMD.

Loopback mode is added, compile time flag 'ENETFEC_LOOPBACK' is
used to enable this feature. By default loopback mode is disabled.
Basic features added like promiscuous enable, basic stats.

Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
 doc/guides/nics/enetfec.rst          |   4 +
 doc/guides/nics/features/enetfec.ini |   5 +
 drivers/net/enetfec/enet_ethdev.c    | 212 +++++++++++-
 drivers/net/enetfec/enet_rxtx.c      | 499 +++++++++++++++++++++++++++
 4 files changed, 719 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/enetfec/enet_rxtx.c

diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst
index 10f495fb9..adbb52392 100644
--- a/doc/guides/nics/enetfec.rst
+++ b/doc/guides/nics/enetfec.rst
@@ -75,6 +75,10 @@ ENETFEC driver.
 ENETFEC Features
 ~~~~~~~~~~~~~~~~~
 
+- Basic stats
+- Promiscuous
+- VLAN offload
+- L3/L4 checksum offload
 - ARMv8
 
 Supported ENETFEC SoCs
diff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini
index 570069798..fcc217773 100644
--- a/doc/guides/nics/features/enetfec.ini
+++ b/doc/guides/nics/features/enetfec.ini
@@ -4,5 +4,10 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Basic stats          = Y
+Promiscuous mode     = Y
+VLAN offload         = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
 ARMv8                = Y
 Usage doc            = Y
diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index b4816179a..ca2cf929f 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -46,6 +46,9 @@
 #define ENET_ENET_OPD_V		0xFFF0
 #define ENET_MDIO_PM_TIMEOUT	100 /* ms */
 
+/* Extended buffer descriptor */
+#define ENETFEC_EXTENDED_BD	0
+
 int enetfec_logtype_pmd;
 
 /* Supported Rx offloads */
@@ -61,6 +64,50 @@ static uint64_t dev_tx_offloads_sup =
 		DEV_TX_OFFLOAD_UDP_CKSUM |
 		DEV_TX_OFFLOAD_TCP_CKSUM;
 
+static void enet_free_buffers(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i, q;
+	struct rte_mbuf *mbuf;
+	struct bufdesc  *bdp;
+	struct enetfec_priv_rx_q *rxq;
+	struct enetfec_priv_tx_q *txq;
+
+	for (q = 0; q < dev->data->nb_rx_queues; q++) {
+		rxq = fep->rx_queues[q];
+		bdp = rxq->bd.base;
+		for (i = 0; i < rxq->bd.ring_size; i++) {
+			mbuf = rxq->rx_mbuf[i];
+			rxq->rx_mbuf[i] = NULL;
+			if (mbuf)
+				rte_pktmbuf_free(mbuf);
+			bdp = enet_get_nextdesc(bdp, &rxq->bd);
+		}
+	}
+
+	for (q = 0; q < dev->data->nb_tx_queues; q++) {
+		txq = fep->tx_queues[q];
+		bdp = txq->bd.base;
+		for (i = 0; i < txq->bd.ring_size; i++) {
+			mbuf = txq->tx_mbuf[i];
+			txq->tx_mbuf[i] = NULL;
+			if (mbuf)
+				rte_pktmbuf_free(mbuf);
+		}
+	}
+}
+
+static void enet_free_queue(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++)
+		rte_free(fep->rx_queues[i]);
+	for (i = 0; i < dev->data->nb_tx_queues; i++)
+		rte_free(fep->rx_queues[i]);
+}
+
 /*
  * This function is called to start or restart the FEC during a link
  * change, transmit timeout or to reconfigure the FEC. The network
@@ -189,7 +236,6 @@ enetfec_eth_open(struct rte_eth_dev *dev)
 	return 0;
 }
 
-
 static int
 enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
 {
@@ -395,12 +441,137 @@ enetfec_rx_queue_setup(struct rte_eth_dev *dev,
 	return -1;
 }
 
+static int
+enetfec_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	uint32_t tmp;
+
+	tmp = rte_read32(fep->hw_baseaddr_v + ENET_RCR);
+	tmp |= 0x8;
+	tmp &= ~0x2;
+	rte_write32(tmp, fep->hw_baseaddr_v + ENET_RCR);
+
+	return 0;
+}
+
+static int
+enetfec_eth_link_update(__rte_unused struct rte_eth_dev *dev,
+			__rte_unused int wait_to_complete)
+{
+	return 0;
+}
+
+static int
+enetfec_stats_get(struct rte_eth_dev *dev,
+	      struct rte_eth_stats *stats)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+	struct rte_eth_stats *eth_stats = &fep->stats;
+
+	if (stats == NULL)
+		return -1;
+
+	memset(stats, 0, sizeof(struct rte_eth_stats));
+
+	stats->ipackets = eth_stats->ipackets;
+	stats->ibytes = eth_stats->ibytes;
+	stats->ierrors = eth_stats->ierrors;
+	stats->opackets = eth_stats->opackets;
+	stats->obytes = eth_stats->obytes;
+	stats->oerrors = eth_stats->oerrors;
+
+	return 0;
+}
+
+static void
+enetfec_stop(__rte_unused struct rte_eth_dev *dev)
+{
+/*TODO*/
+}
+
+static int
+enetfec_eth_close(__rte_unused struct rte_eth_dev *dev)
+{
+	/* phy_stop(ndev->phydev); */
+	enetfec_stop(dev);
+	/* phy_disconnect(ndev->phydev); */
+
+	enet_free_buffers(dev);
+	return 0;
+}
+
+static uint16_t
+enetfec_dummy_xmit_pkts(__rte_unused void *tx_queue,
+		__rte_unused struct rte_mbuf **tx_pkts,
+		__rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
+static uint16_t
+enetfec_dummy_recv_pkts(__rte_unused void *rxq,
+		__rte_unused struct rte_mbuf **rx_pkts,
+		__rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
+static int
+enetfec_eth_stop(__rte_unused struct rte_eth_dev *dev)
+{
+	dev->rx_pkt_burst = &enetfec_dummy_recv_pkts;
+	dev->tx_pkt_burst = &enetfec_dummy_xmit_pkts;
+
+	return 0;
+}
+
+static int
+enetfec_multicast_enable(struct rte_eth_dev *dev)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+
+	rte_write32(0xffffffff, fep->hw_baseaddr_v + ENET_GAUR);
+	rte_write32(0xffffffff, fep->hw_baseaddr_v + ENET_GALR);
+	dev->data->all_multicast = 1;
+
+	rte_write32(0x04400002, fep->hw_baseaddr_v + ENET_GAUR);
+	rte_write32(0x10800049, fep->hw_baseaddr_v + ENET_GALR);
+
+	return 0;
+}
+
+/* Set a MAC change in hardware. */
+static int
+enetfec_set_mac_address(struct rte_eth_dev *dev,
+		    struct rte_ether_addr *addr)
+{
+	struct enetfec_private *fep = dev->data->dev_private;
+
+	writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
+		(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
+		fep->hw_baseaddr_v + ENET_PALR);
+	writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
+		fep->hw_baseaddr_v + ENET_PAUR);
+
+	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+
+	return 0;
+}
+
 static const struct eth_dev_ops ops = {
 	.dev_start = enetfec_eth_open,
+	.dev_stop = enetfec_eth_stop,
+	.dev_close = enetfec_eth_close,
 	.dev_configure = enetfec_eth_configure,
 	.dev_infos_get = enetfec_eth_info,
 	.rx_queue_setup = enetfec_rx_queue_setup,
 	.tx_queue_setup = enetfec_tx_queue_setup,
+	.link_update  = enetfec_eth_link_update,
+	.mac_addr_set	      = enetfec_set_mac_address,
+	.stats_get  = enetfec_stats_get,
+	.promiscuous_enable   = enetfec_promiscuous_enable,
+	.allmulticast_enable = enetfec_multicast_enable
 };
 
 static int
@@ -434,6 +605,7 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
 	struct enetfec_private *fep;
 	const char *name;
 	int rc = -1;
+	struct rte_ether_addr macaddr;
 	int i;
 	unsigned int bdsize;
 
@@ -474,12 +646,37 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
 	}
 
+	/* Copy the station address into the dev structure, */
+	dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+	if (dev->data->mac_addrs == NULL) {
+		ENET_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
+			ETHER_ADDR_LEN);
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	/* TODO get mac address from device tree or get random addr.
+	 * Currently setting default as 1,1,1,1,1,1
+	 */
+	macaddr.addr_bytes[0] = 1;
+	macaddr.addr_bytes[1] = 1;
+	macaddr.addr_bytes[2] = 1;
+	macaddr.addr_bytes[3] = 1;
+	macaddr.addr_bytes[4] = 1;
+	macaddr.addr_bytes[5] = 1;
+
+	enetfec_set_mac_address(dev, &macaddr);
+	/* enable the extended buffer mode */
+	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
+
 	rc = enetfec_eth_init(dev);
 	if (rc)
 		goto failed_init;
 	return 0;
 failed_init:
 	ENET_PMD_ERR("Failed to init");
+err:
+	rte_eth_dev_release_port(dev);
 	return rc;
 }
 
@@ -487,15 +684,28 @@ static int
 pmd_enetfec_remove(struct rte_vdev_device *vdev)
 {
 	struct rte_eth_dev *eth_dev = NULL;
+	struct enetfec_private *fep;
+	struct enetfec_priv_rx_q *rxq;
 
 	/* find the ethdev entry */
 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
 	if (!eth_dev)
 		return -ENODEV;
 
+	fep = eth_dev->data->dev_private;
+	/* Free descriptor base of first RX queue as it was configured
+	 * first in enetfec_eth_init().
+	 */
+	rxq = fep->rx_queues[0];
+	rte_free(rxq->bd.base);
+	enet_free_queue(eth_dev);
+
+	enetfec_eth_stop(eth_dev);
 	rte_eth_dev_release_port(eth_dev);
 
 	ENET_PMD_INFO("Closing sw device\n");
+	munmap(fep->hw_baseaddr_v, fep->cbus_size);
+
 	return 0;
 }
 
diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
new file mode 100644
index 000000000..1b9b86c35
--- /dev/null
+++ b/drivers/net/enetfec/enet_rxtx.c
@@ -0,0 +1,499 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2020 NXP
+ */
+
+#include <signal.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include "enet_regs.h"
+#include "enet_ethdev.h"
+#include "enet_pmd_logs.h"
+
+#define ENETFEC_LOOPBACK	0
+#define ENETFEC_DUMP		0
+
+static volatile bool lb_quit;
+
+#if ENETFEC_DUMP
+static void
+enet_dump(struct enetfec_priv_tx_q *txq)
+{
+	struct bufdesc *bdp;
+	int index = 0;
+
+	ENET_PMD_DEBUG("TX ring dump\n");
+	ENET_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
+
+	bdp = txq->bd.base;
+	do {
+		ENET_PMD_DEBUG("%3u %c%c 0x%04x 0x%08x %4u %p\n",
+			index,
+			bdp == txq->bd.cur ? 'S' : ' ',
+			bdp == txq->dirty_tx ? 'H' : ' ',
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+			rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+			txq->tx_mbuf[index]);
+		bdp = enet_get_nextdesc(bdp, &txq->bd);
+		index++;
+	} while (bdp != txq->bd.base);
+}
+
+static void
+enet_dump_rx(struct enetfec_priv_rx_q *rxq)
+{
+	struct bufdesc *bdp;
+	int index = 0;
+
+	ENET_PMD_DEBUG("RX ring dump\n");
+	ENET_PMD_DEBUG("Nr     SC     addr       len  MBUF\n");
+
+	bdp = rxq->bd.base;
+	do {
+		ENET_PMD_DEBUG("%3u %c 0x%04x 0x%08x %4u %p\n",
+			index,
+			bdp == rxq->bd.cur ? 'S' : ' ',
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+			rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+			rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+			rxq->rx_mbuf[index]);
+		rte_pktmbuf_dump(stdout, rxq->rx_mbuf[index],
+				rxq->rx_mbuf[index]->pkt_len);
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+		index++;
+	} while (bdp != rxq->bd.base);
+}
+#endif
+
+#if ENETFEC_LOOPBACK
+static void fec_signal_handler(int signum)
+{
+	if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
+		printf("\n\n %s: Signal %d received, preparing to exit...\n",
+				__func__, signum);
+		lb_quit = true;
+	}
+}
+
+static void
+enetfec_lb_rxtx(void *rxq1)
+{
+	struct rte_mempool *pool;
+	struct bufdesc *rx_bdp = NULL, *tx_bdp = NULL;
+	struct rte_mbuf *mbuf = NULL, *new_mbuf = NULL;
+	unsigned short status;
+	unsigned short pkt_len = 0;
+	int index_r = 0, index_t = 0;
+	u8 *data;
+	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
+	struct rte_eth_stats *stats = &rxq->fep->stats;
+	unsigned int i;
+	struct enetfec_private *fep;
+	struct enetfec_priv_tx_q *txq;
+	fep = rxq->fep->dev->data->dev_private;
+	txq = fep->tx_queues[0];
+
+	pool = rxq->pool;
+	rx_bdp = rxq->bd.cur;
+	tx_bdp = txq->bd.cur;
+
+	signal(SIGTSTP, fec_signal_handler);
+	while (!lb_quit) {
+chk_again:
+		status = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_sc));
+		if (status & RX_BD_EMPTY) {
+			if (!lb_quit)
+				goto chk_again;
+			rxq->bd.cur = rx_bdp;
+			txq->bd.cur = tx_bdp;
+			return;
+		}
+
+		/* Check for errors. */
+		status ^= RX_BD_LAST;
+		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+			RX_BD_TR)) {
+			stats->ierrors++;
+			if (status & RX_BD_OV) {
+				/* FIFO overrun */
+				ENET_PMD_ERR("rx_fifo_error\n");
+				goto rx_processing_done;
+			}
+			if (status & (RX_BD_LG | RX_BD_SH
+						| RX_BD_LAST)) {
+				/* Frame too long or too short. */
+				ENET_PMD_ERR("rx_length_error\n");
+				if (status & RX_BD_LAST)
+					ENET_PMD_ERR("rcv is not +last\n");
+			}
+			/* CRC Error */
+			if (status & RX_BD_CR)
+				ENET_PMD_ERR("rx_crc_errors\n");
+
+			/* Report late collisions as a frame error. */
+			if (status & (RX_BD_NO | RX_BD_TR))
+				ENET_PMD_ERR("rx_frame_error\n");
+			mbuf = NULL;
+			goto rx_processing_done;
+		}
+
+		new_mbuf = rte_pktmbuf_alloc(pool);
+		if (unlikely(!new_mbuf)) {
+			stats->ierrors++;
+			break;
+		}
+		/* Process the incoming frame. */
+		pkt_len = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_datlen));
+
+		/* shows data with respect to the data_off field. */
+		index_r = enet_get_bd_index(rx_bdp, &rxq->bd);
+		mbuf = rxq->rx_mbuf[index_r];
+
+		/* adjust pkt_len */
+		rte_pktmbuf_append((struct rte_mbuf *)mbuf, pkt_len - 4);
+		if (rxq->fep->quirks & QUIRK_RACC)
+			rte_pktmbuf_adj(mbuf, 2);
+
+		/* Replace Buffer in BD */
+		rxq->rx_mbuf[index_r] = new_mbuf;
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+				&rx_bdp->bd_bufaddr);
+
+rx_processing_done:
+		/* when rx_processing_done clear the status flags
+		 * for this buffer
+		 */
+		status &= ~RX_BD_STATS;
+
+		/* Mark the buffer empty */
+		status |= RX_BD_EMPTY;
+
+		/* Make sure the updates to rest of the descriptor are
+		 * performed before transferring ownership.
+		 */
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &rx_bdp->bd_sc);
+
+		/* Update BD pointer to next entry */
+		rx_bdp = enet_get_nextdesc(rx_bdp, &rxq->bd);
+
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.
+		 */
+		rte_write32(0, rxq->bd.active_reg_desc);
+
+		/* TX begins: First clean the ring then process packet */
+		index_t = enet_get_bd_index(tx_bdp, &txq->bd);
+		status = rte_le_to_cpu_16(rte_read16(&tx_bdp->bd_sc));
+		if (status & TX_BD_READY)
+			stats->oerrors++;
+			break;
+		if (txq->tx_mbuf[index_t]) {
+			rte_pktmbuf_free(txq->tx_mbuf[index_t]);
+			txq->tx_mbuf[index_t] = NULL;
+		}
+
+		if (mbuf == NULL)
+			continue;
+
+		/* Fill in a Tx ring entry */
+		status &= ~TX_BD_STATS;
+
+		/* Set buffer length and buffer pointer */
+		pkt_len = rte_pktmbuf_pkt_len(mbuf);
+		status |= (TX_BD_LAST);
+		data = rte_pktmbuf_mtod(mbuf, void *);
+
+		for (i = 0; i <= pkt_len; i += RTE_CACHE_LINE_SIZE)
+			dcbf(data + i);
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+			&tx_bdp->bd_bufaddr);
+		rte_write16(rte_cpu_to_le_16(pkt_len), &tx_bdp->bd_datlen);
+
+		/* Make sure the updates to rest of the descriptor are performed
+		 * before transferring ownership.
+		 */
+		status |= (TX_BD_READY | TX_BD_TC);
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &tx_bdp->bd_sc);
+
+		/* Trigger transmission start */
+		rte_write32(0, txq->bd.active_reg_desc);
+
+		/* Save mbuf pointer to clean later */
+		txq->tx_mbuf[index_t] = mbuf;
+
+		/* If this was the last BD in the ring, start at the
+		 * beginning again.
+		 */
+		tx_bdp = enet_get_nextdesc(tx_bdp, &txq->bd);
+	}
+}
+#endif
+
+/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
+ * When update through the ring, just set the empty indicator.
+ */
+uint16_t
+enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct rte_mempool *pool;
+	struct bufdesc *bdp;
+	struct rte_mbuf *mbuf, *new_mbuf = NULL;
+	unsigned short status;
+	unsigned short pkt_len;
+	int pkt_received = 0, index = 0;
+	void *data, *mbuf_data;
+	uint16_t vlan_tag;
+	struct  bufdesc_ex *ebdp = NULL;
+	bool    vlan_packet_rcvd = false;
+	struct enetfec_priv_rx_q *rxq  = (struct enetfec_priv_rx_q *)rxq1;
+	struct rte_eth_stats *stats = &rxq->fep->stats;
+	struct rte_eth_conf *eth_conf = &rxq->fep->dev->data->dev_conf;
+	uint64_t rx_offloads = eth_conf->rxmode.offloads;
+	pool = rxq->pool;
+	bdp = rxq->bd.cur;
+#if ENETFEC_LOOPBACK
+	enetfec_lb_rxtx(rxq1);
+#endif
+	/* Process the incoming packet */
+	status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+	while (!(status & RX_BD_EMPTY)) {
+		if (pkt_received >= nb_pkts)
+			break;
+
+		new_mbuf = rte_pktmbuf_alloc(pool);
+		if (unlikely(!new_mbuf)) {
+			stats->ierrors++;
+			break;
+		}
+		/* Check for errors. */
+		status ^= RX_BD_LAST;
+		if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+			RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+			RX_BD_TR)) {
+			stats->ierrors++;
+			if (status & RX_BD_OV) {
+				/* FIFO overrun */
+				/* enet_dump_rx(rxq); */
+				ENET_PMD_ERR("rx_fifo_error\n");
+				goto rx_processing_done;
+			}
+			if (status & (RX_BD_LG | RX_BD_SH
+						| RX_BD_LAST)) {
+				/* Frame too long or too short. */
+				ENET_PMD_ERR("rx_length_error\n");
+				if (status & RX_BD_LAST)
+					ENET_PMD_ERR("rcv is not +last\n");
+			}
+			if (status & RX_BD_CR) {     /* CRC Error */
+				ENET_PMD_ERR("rx_crc_errors\n");
+			}
+			/* Report late collisions as a frame error. */
+			if (status & (RX_BD_NO | RX_BD_TR))
+				ENET_PMD_ERR("rx_frame_error\n");
+			goto rx_processing_done;
+		}
+
+		/* Process the incoming frame. */
+		stats->ipackets++;
+		pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
+		stats->ibytes += pkt_len;
+
+		/* shows data with respect to the data_off field. */
+		index = enet_get_bd_index(bdp, &rxq->bd);
+		mbuf = rxq->rx_mbuf[index];
+
+		data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+		mbuf_data = data;
+		rte_prefetch0(data);
+		rte_pktmbuf_append((struct rte_mbuf *)mbuf,
+				pkt_len - 4);
+
+		if (rxq->fep->quirks & QUIRK_RACC)
+			data = rte_pktmbuf_adj(mbuf, 2);
+
+		rx_pkts[pkt_received] = mbuf;
+		pkt_received++;
+
+		/* Extract the enhanced buffer descriptor */
+		ebdp = NULL;
+		if (rxq->fep->bufdesc_ex)
+			ebdp = (struct bufdesc_ex *)bdp;
+
+		/* If this is a VLAN packet remove the VLAN Tag */
+		vlan_packet_rcvd = false;
+		if ((rx_offloads & DEV_RX_OFFLOAD_VLAN) &&
+				rxq->fep->bufdesc_ex &&
+				(rte_read32(&ebdp->bd_esc) &
+				rte_cpu_to_le_32(BD_ENET_RX_VLAN))) {
+			/* Push and remove the vlan tag */
+			struct rte_vlan_hdr *vlan_header =
+				(struct rte_vlan_hdr *)(data + ETH_HLEN);
+			vlan_tag = rte_be_to_cpu_16(vlan_header->vlan_tci);
+
+			vlan_packet_rcvd = true;
+			memmove(mbuf_data + VLAN_HLEN, data, ETH_ALEN * 2);
+			rte_pktmbuf_adj(mbuf, VLAN_HLEN);
+		}
+
+		/* Get receive timestamp from the mbuf */
+		if (rxq->fep->hw_ts_rx_en && rxq->fep->bufdesc_ex)
+			mbuf->timestamp =
+				rte_le_to_cpu_32(rte_read32(&ebdp->ts));
+
+		if (rxq->fep->bufdesc_ex &&
+				(rxq->fep->flag_csum & RX_FLAG_CSUM_EN)) {
+			if (!(rte_read32(&ebdp->bd_esc) &
+					rte_cpu_to_le_32(RX_FLAG_CSUM_ERR))) {
+				/* don't check it */
+				mbuf->ol_flags = PKT_RX_IP_CKSUM_BAD;
+			} else {
+				mbuf->ol_flags = PKT_RX_IP_CKSUM_GOOD;
+			}
+		}
+
+		/* Handle received VLAN packets */
+		if (vlan_packet_rcvd) {
+			mbuf->vlan_tci = vlan_tag;
+			mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		}
+
+		rxq->rx_mbuf[index] = new_mbuf;
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+				&bdp->bd_bufaddr);
+rx_processing_done:
+		/* when rx_processing_done clear the status flags
+		 * for this buffer
+		 */
+		status &= ~RX_BD_STATS;
+
+		/* Mark the buffer empty */
+		status |= RX_BD_EMPTY;
+
+		if (rxq->fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			rte_write32(rte_cpu_to_le_32(RX_BD_INT),
+				    &ebdp->bd_esc);
+			rte_write32(0, &ebdp->bd_prot);
+			rte_write32(0, &ebdp->bd_bdu);
+		}
+
+		/* Make sure the updates to rest of the descriptor are
+		 * performed before transferring ownership.
+		 */
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+		/* Update BD pointer to next entry */
+		bdp = enet_get_nextdesc(bdp, &rxq->bd);
+
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.
+		 */
+		rte_write32(0, rxq->bd.active_reg_desc);
+		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+	}
+	rxq->bd.cur = bdp;
+	return pkt_received;
+}
+
+uint16_t
+enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct enetfec_priv_tx_q *txq  =
+			(struct enetfec_priv_tx_q *)tx_queue;
+	struct rte_eth_stats *stats = &txq->fep->stats;
+	struct bufdesc *bdp, *last_bdp;
+	struct rte_mbuf *mbuf;
+	unsigned short status;
+	unsigned short buflen;
+	unsigned int index, estatus = 0;
+	unsigned int i, pkt_transmitted = 0;
+	u8 *data;
+	int tx_st = 1;
+
+	while (tx_st) {
+		if (pkt_transmitted >= nb_pkts) {
+			tx_st = 0;
+			break;
+		}
+		bdp = txq->bd.cur;
+		/* First clean the ring */
+		index = enet_get_bd_index(bdp, &txq->bd);
+		status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+
+		if (status & TX_BD_READY) {
+			stats->oerrors++;
+			break;
+		}
+		if (txq->tx_mbuf[index]) {
+			rte_pktmbuf_free(txq->tx_mbuf[index]);
+			txq->tx_mbuf[index] = NULL;
+		}
+
+		mbuf = *(tx_pkts);
+		tx_pkts++;
+
+		/* Fill in a Tx ring entry */
+		last_bdp = bdp;
+		status &= ~TX_BD_STATS;
+
+		/* Set buffer length and buffer pointer */
+		buflen = rte_pktmbuf_pkt_len(mbuf);
+		stats->opackets++;
+		stats->obytes += buflen;
+
+		if (mbuf->nb_segs > 1) {
+			ENET_PMD_DEBUG("SG not supported");
+			return -1;
+		}
+		status |= (TX_BD_LAST);
+		data = rte_pktmbuf_mtod(mbuf, void *);
+		for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
+			dcbf(data + i);
+
+		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+			    &bdp->bd_bufaddr);
+		rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
+
+		if (txq->fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+			if (mbuf->ol_flags == PKT_RX_IP_CKSUM_GOOD)
+				estatus |= TX_BD_PINS | TX_BD_IINS;
+
+			rte_write32(0, &ebdp->bd_bdu);
+			rte_write32(rte_cpu_to_le_32(estatus),
+				    &ebdp->bd_esc);
+		}
+
+		index = enet_get_bd_index(last_bdp, &txq->bd);
+		/* Save mbuf pointer */
+		txq->tx_mbuf[index] = mbuf;
+
+		/* Make sure the updates to rest of the descriptor are performed
+		 * before transferring ownership.
+		 */
+		status |= (TX_BD_READY | TX_BD_TC);
+		rte_wmb();
+		rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+		/* Trigger transmission start */
+		rte_write32(0, txq->bd.active_reg_desc);
+		pkt_transmitted++;
+
+		/* If this was the last BD in the ring, start at the
+		 * beginning again.
+		 */
+		bdp = enet_get_nextdesc(last_bdp, &txq->bd);
+
+		/* Make sure the update to bdp and tx_skbuff are performed
+		 * before txq->bd.cur.
+		 */
+		txq->bd.cur = bdp;
+	}
+	return nb_pkts;
+}
-- 
2.17.1


  parent reply	other threads:[~2021-04-30  4:35 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-30  4:34 [dpdk-dev] [PATCH 0/4] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-04-30  4:34 ` [dpdk-dev] [PATCH 1/4] drivers/net/enetfec: Introduce " Apeksha Gupta
2021-06-08 13:10   ` Andrew Rybchenko
2021-07-02 13:55     ` David Marchand
2021-07-04  2:57   ` Sachin Saxena (OSS)
2021-04-30  4:34 ` [dpdk-dev] [PATCH 2/4] drivers/net/enetfec: UIO support added Apeksha Gupta
2021-06-08 13:21   ` Andrew Rybchenko
2021-07-04  4:27   ` Sachin Saxena (OSS)
2021-04-30  4:34 ` [dpdk-dev] [PATCH 3/4] drivers/net/enetfec: queue configuration Apeksha Gupta
2021-06-08 13:38   ` Andrew Rybchenko
2021-07-04  6:46   ` Sachin Saxena (OSS)
2021-04-30  4:34 ` Apeksha Gupta [this message]
2021-06-08 13:42   ` [dpdk-dev] [PATCH 4/4] drivers/net/enetfec: add enqueue and dequeue support Andrew Rybchenko
2021-06-21  9:14     ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-07-05  8:48   ` [dpdk-dev] " Sachin Saxena (OSS)
2021-05-04 15:40 ` [dpdk-dev] [PATCH 0/4] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-07-04  2:55 ` Sachin Saxena (OSS)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210430043424.19752-5-apeksha.gupta@nxp.com \
    --to=apeksha.gupta@nxp.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).