From: Apeksha Gupta <apeksha.gupta@nxp.com>
To: david.marchand@redhat.com, andrew.rybchenko@oktetlabs.ru,
ferruh.yigit@intel.com
Cc: dev@dpdk.org, sachin.saxena@nxp.com, hemant.agrawal@nxp.com,
Apeksha Gupta <apeksha.gupta@nxp.com>
Subject: [dpdk-dev] [PATCH v5 4/5] net/enetfec: add enqueue and dequeue support
Date: Wed, 20 Oct 2021 00:10:02 +0530 [thread overview]
Message-ID: <20211019184003.23128-5-apeksha.gupta@nxp.com> (raw)
In-Reply-To: <20211019184003.23128-1-apeksha.gupta@nxp.com>
This patch adds burst enqueue and dequeue operations to the enetfec
PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK' is
used to enable this feature. By default loopback mode is disabled.
Basic features added like promiscuous enable, basic stats.
Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
doc/guides/nics/enetfec.rst | 2 +
doc/guides/nics/features/enetfec.ini | 2 +
drivers/net/enetfec/enet_ethdev.c | 197 ++++++++++++
drivers/net/enetfec/enet_rxtx.c | 445 +++++++++++++++++++++++++++
4 files changed, 646 insertions(+)
create mode 100644 drivers/net/enetfec/enet_rxtx.c
diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst
index 36b6e34c6a..6c4e23379f 100644
--- a/doc/guides/nics/enetfec.rst
+++ b/doc/guides/nics/enetfec.rst
@@ -80,6 +80,8 @@ driver.
ENETFEC Features
~~~~~~~~~~~~~~~~~
+- Basic stats
+- Promiscuous
- Linux
- ARMv8
diff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini
index bdfbdbd9d4..7e0fb148ac 100644
--- a/doc/guides/nics/features/enetfec.ini
+++ b/doc/guides/nics/features/enetfec.ini
@@ -4,6 +4,8 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Basic stats = Y
+Promiscuous mode = Y
Linux = Y
ARMv8 = Y
Usage doc = Y
diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index 0ff93363c7..4419952443 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -41,6 +41,8 @@
#define ENETFEC_RAFL_V 0x8
#define ENETFEC_OPD_V 0xFFF0
+/* Extended buffer descriptor */
+#define ENETFEC_EXTENDED_BD 0
#define NUM_OF_QUEUES 6
uint32_t e_cntl;
@@ -179,6 +181,40 @@ enetfec_restart(struct rte_eth_dev *dev)
rte_delay_us(10);
}
+static void
+enet_free_buffers(struct rte_eth_dev *dev)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ unsigned int i, q;
+ struct rte_mbuf *mbuf;
+ struct bufdesc *bdp;
+ struct enetfec_priv_rx_q *rxq;
+ struct enetfec_priv_tx_q *txq;
+
+ for (q = 0; q < dev->data->nb_rx_queues; q++) {
+ rxq = fep->rx_queues[q];
+ bdp = rxq->bd.base;
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ mbuf = rxq->rx_mbuf[i];
+ rxq->rx_mbuf[i] = NULL;
+ if (mbuf)
+ rte_pktmbuf_free(mbuf);
+ bdp = enet_get_nextdesc(bdp, &rxq->bd);
+ }
+ }
+
+ for (q = 0; q < dev->data->nb_tx_queues; q++) {
+ txq = fep->tx_queues[q];
+ bdp = txq->bd.base;
+ for (i = 0; i < txq->bd.ring_size; i++) {
+ mbuf = txq->tx_mbuf[i];
+ txq->tx_mbuf[i] = NULL;
+ if (mbuf)
+ rte_pktmbuf_free(mbuf);
+ }
+ }
+}
+
static int
enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
{
@@ -192,6 +228,8 @@ static int
enetfec_eth_start(struct rte_eth_dev *dev)
{
enetfec_restart(dev);
+ dev->rx_pkt_burst = &enetfec_recv_pkts;
+ dev->tx_pkt_burst = &enetfec_xmit_pkts;
return 0;
}
@@ -226,6 +264,110 @@ enetfec_eth_stop(__rte_unused struct rte_eth_dev *dev)
return 0;
}
+static int
+enetfec_eth_close(__rte_unused struct rte_eth_dev *dev)
+{
+ enet_free_buffers(dev);
+ return 0;
+}
+
+static int
+enetfec_eth_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct rte_eth_link link;
+ unsigned int lstatus = 1;
+
+ if (dev == NULL) {
+ ENETFEC_PMD_ERR("Invalid device in link_update.\n");
+ return 0;
+ }
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+
+ link.link_status = lstatus;
+ link.link_speed = ETH_SPEED_NUM_1G;
+
+ ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
+ "Up");
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+enetfec_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ uint32_t tmp;
+
+ tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+ tmp |= 0x8;
+ tmp &= ~0x2;
+ rte_write32(rte_cpu_to_le_32(tmp),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+
+ return 0;
+}
+
+static int
+enetfec_multicast_enable(struct rte_eth_dev *dev)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+
+ rte_write32(rte_cpu_to_le_32(0xffffffff),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+ rte_write32(rte_cpu_to_le_32(0xffffffff),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+ dev->data->all_multicast = 1;
+
+ rte_write32(rte_cpu_to_le_32(0x04400002),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+ rte_write32(rte_cpu_to_le_32(0x10800049),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+
+ return 0;
+}
+
+/* Set a MAC change in hardware. */
+static int
+enetfec_set_mac_address(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+
+ writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
+ (addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
+ writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
+
+ rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+
+ return 0;
+}
+
+static int
+enetfec_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ struct rte_eth_stats *eth_stats = &fep->stats;
+
+ if (stats == NULL)
+ return -1;
+
+ memset(stats, 0, sizeof(struct rte_eth_stats));
+
+ stats->ipackets = eth_stats->ipackets;
+ stats->ibytes = eth_stats->ibytes;
+ stats->ierrors = eth_stats->ierrors;
+ stats->opackets = eth_stats->opackets;
+ stats->obytes = eth_stats->obytes;
+ stats->oerrors = eth_stats->oerrors;
+
+ return 0;
+}
+
static int
enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
@@ -237,6 +379,18 @@ enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
return 0;
}
+static void
+enet_free_queue(struct rte_eth_dev *dev)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(fep->rx_queues[i]);
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(fep->rx_queues[i]);
+}
+
static const unsigned short offset_des_active_rxq[] = {
ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
};
@@ -442,6 +596,12 @@ static const struct eth_dev_ops enetfec_ops = {
.dev_configure = enetfec_eth_configure,
.dev_start = enetfec_eth_start,
.dev_stop = enetfec_eth_stop,
+ .dev_close = enetfec_eth_close,
+ .link_update = enetfec_eth_link_update,
+ .promiscuous_enable = enetfec_promiscuous_enable,
+ .allmulticast_enable = enetfec_multicast_enable,
+ .mac_addr_set = enetfec_set_mac_address,
+ .stats_get = enetfec_stats_get,
.dev_infos_get = enetfec_eth_info,
.rx_queue_setup = enetfec_rx_queue_setup,
.tx_queue_setup = enetfec_tx_queue_setup
@@ -468,6 +628,7 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
int rc;
int i;
unsigned int bdsize;
+ struct rte_ether_addr macaddr;
name = rte_vdev_device_name(vdev);
if (name == NULL)
@@ -510,6 +671,27 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
fep->bd_addr_p = fep->bd_addr_p + bdsize;
}
+ /* Copy the station address into the dev structure, */
+ dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+ if (dev->data->mac_addrs == NULL) {
+ ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
+ ETHER_ADDR_LEN);
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Set default mac address
+ */
+ macaddr.addr_bytes[0] = 1;
+ macaddr.addr_bytes[1] = 1;
+ macaddr.addr_bytes[2] = 1;
+ macaddr.addr_bytes[3] = 1;
+ macaddr.addr_bytes[4] = 1;
+ macaddr.addr_bytes[5] = 1;
+ enetfec_set_mac_address(dev, &macaddr);
+
+ fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
rc = enetfec_eth_init(dev);
if (rc)
goto failed_init;
@@ -518,6 +700,8 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
failed_init:
ENETFEC_PMD_ERR("Failed to init");
+err:
+ rte_eth_dev_release_port(dev);
return rc;
}
@@ -525,6 +709,8 @@ static int
pmd_enetfec_remove(struct rte_vdev_device *vdev)
{
struct rte_eth_dev *eth_dev = NULL;
+ struct enetfec_private *fep;
+ struct enetfec_priv_rx_q *rxq;
int ret;
/* find the ethdev entry */
@@ -532,11 +718,22 @@ pmd_enetfec_remove(struct rte_vdev_device *vdev)
if (eth_dev == NULL)
return -ENODEV;
+ fep = eth_dev->data->dev_private;
+ /* Free descriptor base of first RX queue as it was configured
+ * first in enetfec_eth_init().
+ */
+ rxq = fep->rx_queues[0];
+ rte_free(rxq->bd.base);
+ enet_free_queue(eth_dev);
+ enetfec_eth_stop(eth_dev);
+
ret = rte_eth_dev_release_port(eth_dev);
if (ret != 0)
return -EINVAL;
ENETFEC_PMD_INFO("Closing sw device");
+ munmap(fep->hw_baseaddr_v, fep->cbus_size);
+
return 0;
}
diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
new file mode 100644
index 0000000000..445fa97e77
--- /dev/null
+++ b/drivers/net/enetfec/enet_rxtx.c
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <signal.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include "enet_regs.h"
+#include "enet_ethdev.h"
+#include "enet_pmd_logs.h"
+
+#define ENETFEC_LOOPBACK 0
+#define ENETFEC_DUMP 0
+
+#if ENETFEC_DUMP
+static void
+enet_dump(struct enetfec_priv_tx_q *txq)
+{
+ struct bufdesc *bdp;
+ int index = 0;
+
+ ENETFEC_PMD_DEBUG("TX ring dump\n");
+ ENETFEC_PMD_DEBUG("Nr SC addr len MBUF\n");
+
+ bdp = txq->bd.base;
+ do {
+ ENETFEC_PMD_DEBUG("%3u %c%c 0x%04x 0x%08x %4u %p\n",
+ index,
+ bdp == txq->bd.cur ? 'S' : ' ',
+ bdp == txq->dirty_tx ? 'H' : ' ',
+ rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+ rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+ rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+ txq->tx_mbuf[index]);
+ bdp = enet_get_nextdesc(bdp, &txq->bd);
+ index++;
+ } while (bdp != txq->bd.base);
+}
+
+static void
+enet_dump_rx(struct enetfec_priv_rx_q *rxq)
+{
+ struct bufdesc *bdp;
+ int index = 0;
+
+ ENETFEC_PMD_DEBUG("RX ring dump\n");
+ ENETFEC_PMD_DEBUG("Nr SC addr len MBUF\n");
+
+ bdp = rxq->bd.base;
+ do {
+ ENETFEC_PMD_DEBUG("%3u %c 0x%04x 0x%08x %4u %p\n",
+ index,
+ bdp == rxq->bd.cur ? 'S' : ' ',
+ rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+ rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+ rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+ rxq->rx_mbuf[index]);
+ rte_pktmbuf_dump(stdout, rxq->rx_mbuf[index],
+ rxq->rx_mbuf[index]->pkt_len);
+ bdp = enet_get_nextdesc(bdp, &rxq->bd);
+ index++;
+ } while (bdp != rxq->bd.base);
+}
+#endif
+
+#if ENETFEC_LOOPBACK
+static volatile bool lb_quit;
+
+static void fec_signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
+ printf("\n\n %s: Signal %d received, preparing to exit...\n",
+ __func__, signum);
+ lb_quit = true;
+ }
+}
+
+static void
+enetfec_lb_rxtx(void *rxq1)
+{
+ struct rte_mempool *pool;
+ struct bufdesc *rx_bdp = NULL, *tx_bdp = NULL;
+ struct rte_mbuf *mbuf = NULL, *new_mbuf = NULL;
+ unsigned short status;
+ unsigned short pkt_len = 0;
+ int index_r = 0, index_t = 0;
+ u8 *data;
+ struct enetfec_priv_rx_q *rxq = (struct enetfec_priv_rx_q *)rxq1;
+ struct rte_eth_stats *stats = &rxq->fep->stats;
+ unsigned int i;
+ struct enetfec_private *fep;
+ struct enetfec_priv_tx_q *txq;
+ fep = rxq->fep->dev->data->dev_private;
+ txq = fep->tx_queues[0];
+
+ pool = rxq->pool;
+ rx_bdp = rxq->bd.cur;
+ tx_bdp = txq->bd.cur;
+
+ signal(SIGTSTP, fec_signal_handler);
+ while (!lb_quit) {
+chk_again:
+ status = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_sc));
+ if (status & RX_BD_EMPTY) {
+ if (!lb_quit)
+ goto chk_again;
+ rxq->bd.cur = rx_bdp;
+ txq->bd.cur = tx_bdp;
+ return;
+ }
+
+ /* Check for errors. */
+ status ^= RX_BD_LAST;
+ if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+ RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+ RX_BD_TR)) {
+ stats->ierrors++;
+ if (status & RX_BD_OV) {
+ /* FIFO overrun */
+ ENETFEC_PMD_ERR("rx_fifo_error\n");
+ goto rx_processing_done;
+ }
+ if (status & (RX_BD_LG | RX_BD_SH
+ | RX_BD_LAST)) {
+ /* Frame too long or too short. */
+ ENETFEC_PMD_ERR("rx_length_error\n");
+ if (status & RX_BD_LAST)
+ ENETFEC_PMD_ERR("rcv is not +last\n");
+ }
+ /* CRC Error */
+ if (status & RX_BD_CR)
+ ENETFEC_PMD_ERR("rx_crc_errors\n");
+
+ /* Report late collisions as a frame error. */
+ if (status & (RX_BD_NO | RX_BD_TR))
+ ENETFEC_PMD_ERR("rx_frame_error\n");
+ mbuf = NULL;
+ goto rx_processing_done;
+ }
+
+ new_mbuf = rte_pktmbuf_alloc(pool);
+ if (unlikely(!new_mbuf)) {
+ stats->ierrors++;
+ break;
+ }
+ /* Process the incoming frame. */
+ pkt_len = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_datlen));
+
+ /* shows data with respect to the data_off field. */
+ index_r = enet_get_bd_index(rx_bdp, &rxq->bd);
+ mbuf = rxq->rx_mbuf[index_r];
+
+ /* adjust pkt_len */
+ rte_pktmbuf_append((struct rte_mbuf *)mbuf, pkt_len - 4);
+ if (rxq->fep->quirks & QUIRK_RACC)
+ rte_pktmbuf_adj(mbuf, 2);
+
+ /* Replace Buffer in BD */
+ rxq->rx_mbuf[index_r] = new_mbuf;
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+ &rx_bdp->bd_bufaddr);
+
+rx_processing_done:
+ /* when rx_processing_done clear the status flags
+ * for this buffer
+ */
+ status &= ~RX_BD_STATS;
+
+ /* Mark the buffer empty */
+ status |= RX_BD_EMPTY;
+
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ rte_wmb();
+ rte_write16(rte_cpu_to_le_16(status), &rx_bdp->bd_sc);
+
+ /* Update BD pointer to next entry */
+ rx_bdp = enet_get_nextdesc(rx_bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames.
+ */
+ rte_write32(0, rxq->bd.active_reg_desc);
+
+ /* TX begins: First clean the ring then process packet */
+ index_t = enet_get_bd_index(tx_bdp, &txq->bd);
+ status = rte_le_to_cpu_16(rte_read16(&tx_bdp->bd_sc));
+ if (status & TX_BD_READY)
+ stats->oerrors++;
+ break;
+ if (txq->tx_mbuf[index_t]) {
+ rte_pktmbuf_free(txq->tx_mbuf[index_t]);
+ txq->tx_mbuf[index_t] = NULL;
+ }
+
+ if (mbuf == NULL)
+ continue;
+
+ /* Fill in a Tx ring entry */
+ status &= ~TX_BD_STATS;
+
+ /* Set buffer length and buffer pointer */
+ pkt_len = rte_pktmbuf_pkt_len(mbuf);
+ status |= (TX_BD_LAST);
+ data = rte_pktmbuf_mtod(mbuf, void *);
+
+ for (i = 0; i <= pkt_len; i += RTE_CACHE_LINE_SIZE)
+ dcbf(data + i);
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+ &tx_bdp->bd_bufaddr);
+ rte_write16(rte_cpu_to_le_16(pkt_len), &tx_bdp->bd_datlen);
+
+ /* Make sure the updates to rest of the descriptor are performed
+ * before transferring ownership.
+ */
+ status |= (TX_BD_READY | TX_BD_TC);
+ rte_wmb();
+ rte_write16(rte_cpu_to_le_16(status), &tx_bdp->bd_sc);
+
+ /* Trigger transmission start */
+ rte_write32(0, txq->bd.active_reg_desc);
+
+ /* Save mbuf pointer to clean later */
+ txq->tx_mbuf[index_t] = mbuf;
+
+ /* If this was the last BD in the ring, start at the
+ * beginning again.
+ */
+ tx_bdp = enet_get_nextdesc(tx_bdp, &txq->bd);
+ }
+}
+#endif
+
+/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
+ * When update through the ring, just set the empty indicator.
+ */
+uint16_t
+enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mempool *pool;
+ struct bufdesc *bdp;
+ struct rte_mbuf *mbuf, *new_mbuf = NULL;
+ unsigned short status;
+ unsigned short pkt_len;
+ int pkt_received = 0, index = 0;
+ void *data;
+ struct enetfec_priv_rx_q *rxq = (struct enetfec_priv_rx_q *)rxq1;
+ struct rte_eth_stats *stats = &rxq->fep->stats;
+ pool = rxq->pool;
+ bdp = rxq->bd.cur;
+#if ENETFEC_LOOPBACK
+ enetfec_lb_rxtx(rxq1);
+#endif
+ /* Process the incoming packet */
+ status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+ while ((status & RX_BD_EMPTY) == 0) {
+ if (pkt_received >= nb_pkts)
+ break;
+
+ new_mbuf = rte_pktmbuf_alloc(pool);
+ if (unlikely(new_mbuf == NULL)) {
+ stats->ierrors++;
+ break;
+ }
+ /* Check for errors. */
+ status ^= RX_BD_LAST;
+ if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+ RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+ RX_BD_TR)) {
+ stats->ierrors++;
+ if (status & RX_BD_OV) {
+ /* FIFO overrun */
+ /* enet_dump_rx(rxq); */
+ ENETFEC_PMD_ERR("rx_fifo_error\n");
+ goto rx_processing_done;
+ }
+ if (status & (RX_BD_LG | RX_BD_SH
+ | RX_BD_LAST)) {
+ /* Frame too long or too short. */
+ ENETFEC_PMD_ERR("rx_length_error\n");
+ if (status & RX_BD_LAST)
+ ENETFEC_PMD_ERR("rcv is not +last\n");
+ }
+ if (status & RX_BD_CR) { /* CRC Error */
+ ENETFEC_PMD_ERR("rx_crc_errors\n");
+ }
+ /* Report late collisions as a frame error. */
+ if (status & (RX_BD_NO | RX_BD_TR))
+ ENETFEC_PMD_ERR("rx_frame_error\n");
+ goto rx_processing_done;
+ }
+
+ /* Process the incoming frame. */
+ stats->ipackets++;
+ pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
+ stats->ibytes += pkt_len;
+
+ /* shows data with respect to the data_off field. */
+ index = enet_get_bd_index(bdp, &rxq->bd);
+ mbuf = rxq->rx_mbuf[index];
+
+ data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+ rte_prefetch0(data);
+ rte_pktmbuf_append((struct rte_mbuf *)mbuf,
+ pkt_len - 4);
+
+ if (rxq->fep->quirks & QUIRK_RACC)
+ data = rte_pktmbuf_adj(mbuf, 2);
+
+ rx_pkts[pkt_received] = mbuf;
+ pkt_received++;
+ rxq->rx_mbuf[index] = new_mbuf;
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+ &bdp->bd_bufaddr);
+rx_processing_done:
+ /* when rx_processing_done clear the status flags
+ * for this buffer
+ */
+ status &= ~RX_BD_STATS;
+
+ /* Mark the buffer empty */
+ status |= RX_BD_EMPTY;
+
+ if (rxq->fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ rte_write32(rte_cpu_to_le_32(RX_BD_INT),
+ &ebdp->bd_esc);
+ rte_write32(0, &ebdp->bd_prot);
+ rte_write32(0, &ebdp->bd_bdu);
+ }
+
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ rte_wmb();
+ rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+ /* Update BD pointer to next entry */
+ bdp = enet_get_nextdesc(bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames.
+ */
+ rte_write32(0, rxq->bd.active_reg_desc);
+ status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+ }
+ rxq->bd.cur = bdp;
+ return pkt_received;
+}
+
+uint16_t
+enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct enetfec_priv_tx_q *txq =
+ (struct enetfec_priv_tx_q *)tx_queue;
+ struct rte_eth_stats *stats = &txq->fep->stats;
+ struct bufdesc *bdp, *last_bdp;
+ struct rte_mbuf *mbuf;
+ unsigned short status;
+ unsigned short buflen;
+ unsigned int index, estatus = 0;
+ unsigned int i, pkt_transmitted = 0;
+ u8 *data;
+ int tx_st = 1;
+
+ while (tx_st) {
+ if (pkt_transmitted >= nb_pkts) {
+ tx_st = 0;
+ break;
+ }
+ bdp = txq->bd.cur;
+ /* First clean the ring */
+ index = enet_get_bd_index(bdp, &txq->bd);
+ status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+
+ if (status & TX_BD_READY) {
+ stats->oerrors++;
+ break;
+ }
+ if (txq->tx_mbuf[index]) {
+ rte_pktmbuf_free(txq->tx_mbuf[index]);
+ txq->tx_mbuf[index] = NULL;
+ }
+
+ mbuf = *(tx_pkts);
+ tx_pkts++;
+
+ /* Fill in a Tx ring entry */
+ last_bdp = bdp;
+ status &= ~TX_BD_STATS;
+
+ /* Set buffer length and buffer pointer */
+ buflen = rte_pktmbuf_pkt_len(mbuf);
+ stats->opackets++;
+ stats->obytes += buflen;
+
+ if (mbuf->nb_segs > 1) {
+ ENETFEC_PMD_DEBUG("SG not supported");
+ return -1;
+ }
+ status |= (TX_BD_LAST);
+ data = rte_pktmbuf_mtod(mbuf, void *);
+ for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
+ dcbf(data + i);
+
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+ &bdp->bd_bufaddr);
+ rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
+
+ if (txq->fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ rte_write32(0, &ebdp->bd_bdu);
+ rte_write32(rte_cpu_to_le_32(estatus),
+ &ebdp->bd_esc);
+ }
+
+ index = enet_get_bd_index(last_bdp, &txq->bd);
+ /* Save mbuf pointer */
+ txq->tx_mbuf[index] = mbuf;
+
+ /* Make sure the updates to rest of the descriptor are performed
+ * before transferring ownership.
+ */
+ status |= (TX_BD_READY | TX_BD_TC);
+ rte_wmb();
+ rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+ /* Trigger transmission start */
+ rte_write32(0, txq->bd.active_reg_desc);
+ pkt_transmitted++;
+
+ /* If this was the last BD in the ring, start at the
+ * beginning again.
+ */
+ bdp = enet_get_nextdesc(last_bdp, &txq->bd);
+
+ /* Make sure the update to bdp and tx_skbuff are performed
+ * before txq->bd.cur.
+ */
+ txq->bd.cur = bdp;
+ }
+ return nb_pkts;
+}
--
2.17.1
next prev parent reply other threads:[~2021-10-19 18:42 UTC|newest]
Thread overview: 91+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-01 11:42 [dpdk-dev] [PATCH v4 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-19 18:39 ` [dpdk-dev] [PATCH v5 0/5] drivers/net: add " Apeksha Gupta
2021-10-19 18:39 ` [dpdk-dev] [PATCH v5 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 0/5] drivers/net: add " Apeksha Gupta
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 1/5] net/enetfec: introduce " Apeksha Gupta
2021-10-21 5:24 ` Hemant Agrawal
2021-10-27 14:18 ` Ferruh Yigit
2021-11-08 18:42 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 0/5] drivers/net: add " Apeksha Gupta
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-03 23:27 ` Ferruh Yigit
2021-11-04 18:24 ` Ferruh Yigit
2021-11-08 19:13 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 0/5] drivers/net: add " Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-09 11:34 ` [dpdk-dev] [PATCH v8 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-10 13:53 ` Ferruh Yigit
2021-11-13 4:31 ` [PATCH v10 0/5] drivers/net: add " Apeksha Gupta
2021-11-13 4:31 ` [PATCH v10 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-15 7:19 ` [PATCH v11 0/5] drivers/net: add " Apeksha Gupta
2021-11-15 7:19 ` [PATCH v11 1/5] net/enetfec: introduce " Apeksha Gupta
2021-11-15 10:07 ` Ferruh Yigit
2023-03-21 18:03 ` Ferruh Yigit
2023-03-23 6:00 ` Sachin Saxena (OSS)
2023-03-23 11:07 ` Ferruh Yigit
2023-03-23 11:09 ` Sachin Saxena (OSS)
2021-11-15 7:19 ` [PATCH v11 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-15 7:19 ` [PATCH v11 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-15 10:11 ` Ferruh Yigit
2021-11-15 10:24 ` Ferruh Yigit
2021-11-15 11:15 ` Ferruh Yigit
2021-11-15 7:19 ` [PATCH v11 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-15 7:19 ` [PATCH v11 5/5] net/enetfec: add features Apeksha Gupta
2021-11-15 9:44 ` [PATCH v11 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-11-15 15:05 ` Ferruh Yigit
2021-11-25 16:52 ` Ferruh Yigit
2021-11-13 4:31 ` [PATCH v10 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-13 4:31 ` [PATCH v10 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-13 17:11 ` Stephen Hemminger
2021-11-13 4:31 ` [PATCH v10 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-13 17:10 ` Stephen Hemminger
2021-11-13 4:31 ` [PATCH v10 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-10 13:54 ` Ferruh Yigit
2021-11-13 5:00 ` [EXT] " Apeksha Gupta
2021-11-15 10:06 ` Ferruh Yigit
2021-11-15 10:23 ` Ferruh Yigit
2021-11-15 10:29 ` Ferruh Yigit
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-10 13:56 ` Ferruh Yigit
2021-11-10 7:48 ` [dpdk-dev] [PATCH v9 5/5] net/enetfec: add features Apeksha Gupta
2021-11-10 13:57 ` Ferruh Yigit
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-11-04 18:25 ` Ferruh Yigit
2021-11-08 20:24 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-08 21:51 ` Ferruh Yigit
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-11-04 18:26 ` Ferruh Yigit
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 4/5] net/enetfec: add Rx/Tx support Apeksha Gupta
2021-11-04 18:28 ` Ferruh Yigit
2021-11-09 16:20 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-11-03 19:20 ` [dpdk-dev] [PATCH v7 5/5] net/enetfec: add features Apeksha Gupta
2021-11-04 18:31 ` [dpdk-dev] [PATCH v7 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-27 14:21 ` Ferruh Yigit
2021-11-08 18:44 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-27 14:23 ` Ferruh Yigit
2021-11-08 18:45 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21 4:46 ` [dpdk-dev] [PATCH v6 4/5] net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-10-27 14:25 ` Ferruh Yigit
2021-11-08 18:47 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-21 4:47 ` [dpdk-dev] [PATCH v6 5/5] net/enetfec: add features Apeksha Gupta
2021-10-27 14:26 ` Ferruh Yigit
2021-10-27 14:15 ` [dpdk-dev] [PATCH v6 0/5] drivers/net: add NXP ENETFEC driver Ferruh Yigit
2021-10-19 18:40 ` [dpdk-dev] [PATCH v5 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-19 18:40 ` [dpdk-dev] [PATCH v5 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-19 18:40 ` Apeksha Gupta [this message]
2021-10-19 18:40 ` [dpdk-dev] [PATCH v5 5/5] net/enetfec: add features Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 4/5] net/enetfec: add enqueue and dequeue support Apeksha Gupta
2021-10-01 11:42 ` [dpdk-dev] [PATCH v4 5/5] net/enetfec: add features Apeksha Gupta
2021-10-17 10:49 ` [dpdk-dev] [PATCH v4 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211019184003.23128-5-apeksha.gupta@nxp.com \
--to=apeksha.gupta@nxp.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=hemant.agrawal@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).