From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6BB59A034F; Tue, 9 Nov 2021 12:35:08 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D40454113D; Tue, 9 Nov 2021 12:34:45 +0100 (CET) Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21]) by mails.dpdk.org (Postfix) with ESMTP id 1E7A541109 for ; Tue, 9 Nov 2021 12:34:42 +0100 (CET) Received: from inva021.nxp.com (localhost [127.0.0.1]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id F3EE820170B; Tue, 9 Nov 2021 12:34:41 +0100 (CET) Received: from aprdc01srsp001v.ap-rdc01.nxp.com (aprdc01srsp001v.ap-rdc01.nxp.com [165.114.16.16]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 9335B201716; Tue, 9 Nov 2021 12:34:41 +0100 (CET) Received: from lsv03186.swis.in-blr01.nxp.com (lsv03186.swis.in-blr01.nxp.com [92.120.146.182]) by aprdc01srsp001v.ap-rdc01.nxp.com (Postfix) with ESMTP id 93C64183AC97; Tue, 9 Nov 2021 19:34:40 +0800 (+08) From: Apeksha Gupta To: david.marchand@redhat.com, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru Cc: dev@dpdk.org, sachin.saxena@nxp.com, hemant.agrawal@nxp.com, Apeksha Gupta Date: Tue, 9 Nov 2021 17:04:31 +0530 Message-Id: <20211109113432.11876-5-apeksha.gupta@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211109113432.11876-1-apeksha.gupta@nxp.com> References: <20211103192045.22240-2-apeksha.gupta@nxp.com> <20211109113432.11876-1-apeksha.gupta@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH v8 4/5] net/enetfec: add Rx/Tx support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds burst enqueue and dequeue operations to the enetfec PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK' is used to enable this feature. By default loopback mode is disabled. Basic features added like promiscuous enable, basic stats. Signed-off-by: Sachin Saxena Signed-off-by: Apeksha Gupta --- doc/guides/nics/enetfec.rst | 2 + doc/guides/nics/features/enetfec.ini | 2 + drivers/net/enetfec/enet_ethdev.c | 183 ++++++++++++++++++++++ drivers/net/enetfec/enet_ethdev.h | 23 +++ drivers/net/enetfec/enet_rxtx.c | 220 +++++++++++++++++++++++++++ drivers/net/enetfec/meson.build | 3 +- 6 files changed, 432 insertions(+), 1 deletion(-) create mode 100644 drivers/net/enetfec/enet_rxtx.c diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst index f0460c3ea7..34af51c461 100644 --- a/doc/guides/nics/enetfec.rst +++ b/doc/guides/nics/enetfec.rst @@ -84,6 +84,8 @@ driver. ENETFEC Features ~~~~~~~~~~~~~~~~~ +- Basic stats +- Promiscuous - Linux - ARMv8 diff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini index bdfbdbd9d4..3d8aa5b627 100644 --- a/doc/guides/nics/features/enetfec.ini +++ b/doc/guides/nics/features/enetfec.ini @@ -4,6 +4,8 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +Promiscuous mode = Y +Basic stats = Y Linux = Y ARMv8 = Y Usage doc = Y diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c index f70489ff91..8c8788ad8f 100644 --- a/drivers/net/enetfec/enet_ethdev.c +++ b/drivers/net/enetfec/enet_ethdev.c @@ -39,6 +39,8 @@ #define ENETFEC_RAFL_V 0x8 #define ENETFEC_OPD_V 0xFFF0 +/* Extended buffer descriptor */ +#define ENETFEC_EXTENDED_BD 0 #define NUM_OF_BD_QUEUES 6 /* Supported Rx offloads */ @@ -152,6 +154,40 @@ enetfec_restart(struct rte_eth_dev *dev) rte_delay_us(10); } +static void +enet_free_buffers(struct rte_eth_dev *dev) +{ + struct enetfec_private *fep = dev->data->dev_private; + unsigned int i, q; + struct rte_mbuf *mbuf; + struct bufdesc *bdp; + struct enetfec_priv_rx_q *rxq; + struct enetfec_priv_tx_q *txq; + + for (q = 0; q < dev->data->nb_rx_queues; q++) { + rxq = fep->rx_queues[q]; + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { + mbuf = rxq->rx_mbuf[i]; + rxq->rx_mbuf[i] = NULL; + if (mbuf) + rte_pktmbuf_free(mbuf); + bdp = enet_get_nextdesc(bdp, &rxq->bd); + } + } + + for (q = 0; q < dev->data->nb_tx_queues; q++) { + txq = fep->tx_queues[q]; + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { + mbuf = txq->tx_mbuf[i]; + txq->tx_mbuf[i] = NULL; + if (mbuf) + rte_pktmbuf_free(mbuf); + } + } +} + static int enetfec_eth_configure(struct rte_eth_dev *dev) { @@ -165,6 +201,8 @@ static int enetfec_eth_start(struct rte_eth_dev *dev) { enetfec_restart(dev); + dev->rx_pkt_burst = &enetfec_recv_pkts; + dev->tx_pkt_burst = &enetfec_xmit_pkts; return 0; } @@ -191,6 +229,100 @@ enetfec_eth_stop(struct rte_eth_dev *dev) return 0; } +static int +enetfec_eth_close(struct rte_eth_dev *dev) +{ + enet_free_buffers(dev); + return 0; +} + +static int +enetfec_eth_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct rte_eth_link link; + unsigned int lstatus = 1; + + memset(&link, 0, sizeof(struct rte_eth_link)); + + link.link_status = lstatus; + link.link_speed = ETH_SPEED_NUM_1G; + + ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, + "Up"); + + return rte_eth_linkstatus_set(dev, &link); +} + +static int +enetfec_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct enetfec_private *fep = dev->data->dev_private; + uint32_t tmp; + + tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR); + tmp |= 0x8; + tmp &= ~0x2; + rte_write32(rte_cpu_to_le_32(tmp), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR); + + return 0; +} + +static int +enetfec_multicast_enable(struct rte_eth_dev *dev) +{ + struct enetfec_private *fep = dev->data->dev_private; + + rte_write32(rte_cpu_to_le_32(0xffffffff), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR); + rte_write32(rte_cpu_to_le_32(0xffffffff), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR); + dev->data->all_multicast = 1; + + rte_write32(rte_cpu_to_le_32(0x04400002), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR); + rte_write32(rte_cpu_to_le_32(0x10800049), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR); + + return 0; +} + +/* Set a MAC change in hardware. */ +static int +enetfec_set_mac_address(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct enetfec_private *fep = dev->data->dev_private; + + writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) | + (addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR); + writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR); + + rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); + + return 0; +} + +static int +enetfec_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct enetfec_private *fep = dev->data->dev_private; + struct rte_eth_stats *eth_stats = &fep->stats; + + stats->ipackets = eth_stats->ipackets; + stats->ibytes = eth_stats->ibytes; + stats->ierrors = eth_stats->ierrors; + stats->opackets = eth_stats->opackets; + stats->obytes = eth_stats->obytes; + stats->oerrors = eth_stats->oerrors; + + return 0; +} + static int enetfec_eth_info(__rte_unused struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) @@ -202,6 +334,18 @@ enetfec_eth_info(__rte_unused struct rte_eth_dev *dev, return 0; } +static void +enet_free_queue(struct rte_eth_dev *dev) +{ + struct enetfec_private *fep = dev->data->dev_private; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + rte_free(fep->rx_queues[i]); + for (i = 0; i < dev->data->nb_tx_queues; i++) + rte_free(fep->rx_queues[i]); +} + static const unsigned short offset_des_active_rxq[] = { ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2 }; @@ -407,6 +551,12 @@ static const struct eth_dev_ops enetfec_ops = { .dev_configure = enetfec_eth_configure, .dev_start = enetfec_eth_start, .dev_stop = enetfec_eth_stop, + .dev_close = enetfec_eth_close, + .link_update = enetfec_eth_link_update, + .promiscuous_enable = enetfec_promiscuous_enable, + .allmulticast_enable = enetfec_multicast_enable, + .mac_addr_set = enetfec_set_mac_address, + .stats_get = enetfec_stats_get, .dev_infos_get = enetfec_eth_info, .rx_queue_setup = enetfec_rx_queue_setup, .tx_queue_setup = enetfec_tx_queue_setup @@ -432,6 +582,9 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev) int rc; int i; unsigned int bdsize; + struct rte_ether_addr macaddr = { + .addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 } + }; name = rte_vdev_device_name(vdev); if (name == NULL) @@ -474,6 +627,21 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev) fep->bd_addr_p = fep->bd_addr_p + bdsize; } + /* Copy the station address into the dev structure, */ + dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); + if (dev->data->mac_addrs == NULL) { + ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses", + RTE_ETHER_ADDR_LEN); + rc = -ENOMEM; + goto err; + } + + /* + * Set default mac address + */ + enetfec_set_mac_address(dev, &macaddr); + + fep->bufdesc_ex = ENETFEC_EXTENDED_BD; rc = enetfec_eth_init(dev); if (rc) goto failed_init; @@ -482,6 +650,8 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev) failed_init: ENETFEC_PMD_ERR("Failed to init"); +err: + rte_eth_dev_release_port(dev); return rc; } @@ -489,6 +659,8 @@ static int pmd_enetfec_remove(struct rte_vdev_device *vdev) { struct rte_eth_dev *eth_dev = NULL; + struct enetfec_private *fep; + struct enetfec_priv_rx_q *rxq; int ret; /* find the ethdev entry */ @@ -496,11 +668,22 @@ pmd_enetfec_remove(struct rte_vdev_device *vdev) if (eth_dev == NULL) return -ENODEV; + fep = eth_dev->data->dev_private; + /* Free descriptor base of first RX queue as it was configured + * first in enetfec_eth_init(). + */ + rxq = fep->rx_queues[0]; + rte_free(rxq->bd.base); + enet_free_queue(eth_dev); + enetfec_eth_stop(eth_dev); + ret = rte_eth_dev_release_port(eth_dev); if (ret != 0) return -EINVAL; ENETFEC_PMD_INFO("Release enetfec sw device"); + enetfec_cleanup(fep); + return 0; } diff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h index babc7190fb..c6f8cf7f03 100644 --- a/drivers/net/enetfec/enet_ethdev.h +++ b/drivers/net/enetfec/enet_ethdev.h @@ -7,6 +7,10 @@ #include +#define BD_LEN 49152 +#define ENETFEC_TX_FR_SIZE 2048 +#define ETH_HLEN RTE_ETHER_HDR_LEN + /* full duplex */ #define FULL_DUPLEX 0x00 @@ -17,6 +21,20 @@ #define ENETFEC_MAX_RX_PKT_LEN 3000 #define __iomem +#if defined(RTE_ARCH_ARM) +#if defined(RTE_ARCH_64) +#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } +#define dcbf_64(p) dcbf(p) + +#else /* RTE_ARCH_32 */ +#define dcbf(p) RTE_SET_USED(p) +#define dcbf_64(p) dcbf(p) +#endif + +#else +#define dcbf(p) RTE_SET_USED(p) +#define dcbf_64(p) dcbf(p) +#endif /* * ENETFEC can support 1 rx and tx queue.. @@ -128,4 +146,9 @@ enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd) return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2; } +uint16_t enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + #endif /*__ENETFEC_ETHDEV_H__*/ diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c new file mode 100644 index 0000000000..4e6a263e67 --- /dev/null +++ b/drivers/net/enetfec/enet_rxtx.c @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2021 NXP + */ + +#include +#include +#include +#include "enet_regs.h" +#include "enet_ethdev.h" +#include "enet_pmd_logs.h" + +/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue + * When update through the ring, just set the empty indicator. + */ +uint16_t +enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mempool *pool; + struct bufdesc *bdp; + struct rte_mbuf *mbuf, *new_mbuf = NULL; + unsigned short status; + unsigned short pkt_len; + int pkt_received = 0, index = 0; + void *data; + struct enetfec_priv_rx_q *rxq = (struct enetfec_priv_rx_q *)rxq1; + struct rte_eth_stats *stats = &rxq->fep->stats; + pool = rxq->pool; + bdp = rxq->bd.cur; + + /* Process the incoming packet */ + status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc)); + while ((status & RX_BD_EMPTY) == 0) { + if (pkt_received >= nb_pkts) + break; + + new_mbuf = rte_pktmbuf_alloc(pool); + if (unlikely(new_mbuf == NULL)) { + stats->ierrors++; + break; + } + /* Check for errors. */ + status ^= RX_BD_LAST; + if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO | + RX_BD_CR | RX_BD_OV | RX_BD_LAST | + RX_BD_TR)) { + stats->ierrors++; + if (status & RX_BD_OV) { + /* FIFO overrun */ + /* enet_dump_rx(rxq); */ + ENETFEC_DP_LOG(DEBUG, "rx_fifo_error"); + goto rx_processing_done; + } + if (status & (RX_BD_LG | RX_BD_SH + | RX_BD_LAST)) { + /* Frame too long or too short. */ + ENETFEC_DP_LOG(DEBUG, "rx_length_error"); + if (status & RX_BD_LAST) + ENETFEC_DP_LOG(DEBUG, "rcv is not +last"); + } + if (status & RX_BD_CR) { /* CRC Error */ + ENETFEC_DP_LOG(DEBUG, "rx_crc_errors"); + } + /* Report late collisions as a frame error. */ + if (status & (RX_BD_NO | RX_BD_TR)) + ENETFEC_DP_LOG(DEBUG, "rx_frame_error"); + goto rx_processing_done; + } + + /* Process the incoming frame. */ + stats->ipackets++; + pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen)); + stats->ibytes += pkt_len; + + /* shows data with respect to the data_off field. */ + index = enet_get_bd_index(bdp, &rxq->bd); + mbuf = rxq->rx_mbuf[index]; + + data = rte_pktmbuf_mtod(mbuf, uint8_t *); + rte_prefetch0(data); + rte_pktmbuf_append((struct rte_mbuf *)mbuf, + pkt_len - 4); + + if (rxq->fep->quirks & QUIRK_RACC) + data = rte_pktmbuf_adj(mbuf, 2); + + rx_pkts[pkt_received] = mbuf; + pkt_received++; + rxq->rx_mbuf[index] = new_mbuf; + rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)), + &bdp->bd_bufaddr); +rx_processing_done: + /* when rx_processing_done clear the status flags + * for this buffer + */ + status &= ~RX_BD_STATS; + + /* Mark the buffer empty */ + status |= RX_BD_EMPTY; + + if (rxq->fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + rte_write32(rte_cpu_to_le_32(RX_BD_INT), + &ebdp->bd_esc); + rte_write32(0, &ebdp->bd_prot); + rte_write32(0, &ebdp->bd_bdu); + } + + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + rte_wmb(); + rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc); + + /* Update BD pointer to next entry */ + bdp = enet_get_nextdesc(bdp, &rxq->bd); + + /* Doing this here will keep the FEC running while we process + * incoming frames. + */ + rte_write32(0, rxq->bd.active_reg_desc); + status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc)); + } + rxq->bd.cur = bdp; + return pkt_received; +} + +uint16_t +enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct enetfec_priv_tx_q *txq = + (struct enetfec_priv_tx_q *)tx_queue; + struct rte_eth_stats *stats = &txq->fep->stats; + struct bufdesc *bdp, *last_bdp; + struct rte_mbuf *mbuf; + unsigned short status; + unsigned short buflen; + unsigned int index, estatus = 0; + unsigned int i, pkt_transmitted = 0; + uint8_t *data; + int tx_st = 1; + + while (tx_st) { + if (pkt_transmitted >= nb_pkts) { + tx_st = 0; + break; + } + bdp = txq->bd.cur; + /* First clean the ring */ + index = enet_get_bd_index(bdp, &txq->bd); + status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc)); + + if (status & TX_BD_READY) { + stats->oerrors++; + break; + } + if (txq->tx_mbuf[index]) { + rte_pktmbuf_free(txq->tx_mbuf[index]); + txq->tx_mbuf[index] = NULL; + } + + mbuf = *(tx_pkts); + tx_pkts++; + + /* Fill in a Tx ring entry */ + last_bdp = bdp; + status &= ~TX_BD_STATS; + + /* Set buffer length and buffer pointer */ + buflen = rte_pktmbuf_pkt_len(mbuf); + stats->opackets++; + stats->obytes += buflen; + + if (mbuf->nb_segs > 1) { + ENETFEC_PMD_DEBUG("SG not supported"); + return -1; + } + status |= (TX_BD_LAST); + data = rte_pktmbuf_mtod(mbuf, void *); + for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE) + dcbf(data + i); + + rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)), + &bdp->bd_bufaddr); + rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen); + + if (txq->fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + rte_write32(0, &ebdp->bd_bdu); + rte_write32(rte_cpu_to_le_32(estatus), + &ebdp->bd_esc); + } + + index = enet_get_bd_index(last_bdp, &txq->bd); + /* Save mbuf pointer */ + txq->tx_mbuf[index] = mbuf; + + /* Make sure the updates to rest of the descriptor are performed + * before transferring ownership. + */ + status |= (TX_BD_READY | TX_BD_TC); + rte_wmb(); + rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc); + + /* Trigger transmission start */ + rte_write32(0, txq->bd.active_reg_desc); + pkt_transmitted++; + + /* If this was the last BD in the ring, start at the + * beginning again. + */ + bdp = enet_get_nextdesc(last_bdp, &txq->bd); + + /* Make sure the update to bdp and tx_skbuff are performed + * before txq->bd.cur. + */ + txq->bd.cur = bdp; + } + return nb_pkts; +} diff --git a/drivers/net/enetfec/meson.build b/drivers/net/enetfec/meson.build index 57f316b8a5..79dca58dea 100644 --- a/drivers/net/enetfec/meson.build +++ b/drivers/net/enetfec/meson.build @@ -7,4 +7,5 @@ if not is_linux endif sources = files('enet_ethdev.c', - 'enet_uio.c') + 'enet_uio.c', + 'enet_rxtx.c') -- 2.17.1