From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id ABFF5A0C43; Mon, 15 Nov 2021 08:20:12 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id BF6F241154; Mon, 15 Nov 2021 08:19:56 +0100 (CET) Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21]) by mails.dpdk.org (Postfix) with ESMTP id 292694113E for ; Mon, 15 Nov 2021 08:19:52 +0100 (CET) Received: from inva021.nxp.com (localhost [127.0.0.1]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 04B33200D2B; Mon, 15 Nov 2021 08:19:52 +0100 (CET) Received: from aprdc01srsp001v.ap-rdc01.nxp.com (aprdc01srsp001v.ap-rdc01.nxp.com [165.114.16.16]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 96D102018BF; Mon, 15 Nov 2021 08:19:51 +0100 (CET) Received: from lsv03186.swis.in-blr01.nxp.com (lsv03186.swis.in-blr01.nxp.com [92.120.146.182]) by aprdc01srsp001v.ap-rdc01.nxp.com (Postfix) with ESMTP id 872EF183AD6D; Mon, 15 Nov 2021 15:19:50 +0800 (+08) From: Apeksha Gupta To: stephen@networkplumber.org, ferruh.yigit@intel.com Cc: david.marchand@redhat.com, andrew.rybchenko@oktetlabs.ru, dev@dpdk.org, sachin.saxena@nxp.com, hemant.agrawal@nxp.com, Apeksha Gupta Subject: [PATCH v11 4/5] net/enetfec: add Rx/Tx support Date: Mon, 15 Nov 2021 12:49:39 +0530 Message-Id: <20211115071940.12942-5-apeksha.gupta@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211115071940.12942-1-apeksha.gupta@nxp.com> References: <20211113043141.18888-2-apeksha.gupta@nxp.com> <20211115071940.12942-1-apeksha.gupta@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch adds burst enqueue and dequeue operations to the enetfec PMD. Basic features added like promiscuous enable, basic stats. Signed-off-by: Sachin Saxena Signed-off-by: Apeksha Gupta Acked-by: Hemant Agrawal --- doc/guides/nics/enetfec.rst | 2 + doc/guides/nics/features/enetfec.ini | 2 + drivers/net/enetfec/enet_ethdev.c | 182 ++++++++++++++++++++++ drivers/net/enetfec/enet_ethdev.h | 25 +++ drivers/net/enetfec/enet_rxtx.c | 220 +++++++++++++++++++++++++++ drivers/net/enetfec/meson.build | 4 +- 6 files changed, 434 insertions(+), 1 deletion(-) create mode 100644 drivers/net/enetfec/enet_rxtx.c diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst index 6a86295e34..209073e77c 100644 --- a/doc/guides/nics/enetfec.rst +++ b/doc/guides/nics/enetfec.rst @@ -84,6 +84,8 @@ driver. ENETFEC Features ~~~~~~~~~~~~~~~~~ +- Basic stats +- Promiscuous - Linux - ARMv8 diff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini index bdfbdbd9d4..3d8aa5b627 100644 --- a/doc/guides/nics/features/enetfec.ini +++ b/doc/guides/nics/features/enetfec.ini @@ -4,6 +4,8 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +Promiscuous mode = Y +Basic stats = Y Linux = Y ARMv8 = Y Usage doc = Y diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c index 0b8b73615d..9ac7501043 100644 --- a/drivers/net/enetfec/enet_ethdev.c +++ b/drivers/net/enetfec/enet_ethdev.c @@ -39,6 +39,8 @@ #define ENETFEC_RAFL_V 0x8 #define ENETFEC_OPD_V 0xFFF0 +/* Extended buffer descriptor */ +#define ENETFEC_EXTENDED_BD 0 #define NUM_OF_BD_QUEUES 6 /* Supported Rx offloads */ @@ -152,6 +154,38 @@ enetfec_restart(struct rte_eth_dev *dev) rte_delay_us(10); } +static void +enet_free_buffers(struct rte_eth_dev *dev) +{ + struct enetfec_private *fep = dev->data->dev_private; + unsigned int i, q; + struct rte_mbuf *mbuf; + struct bufdesc *bdp; + struct enetfec_priv_rx_q *rxq; + struct enetfec_priv_tx_q *txq; + + for (q = 0; q < dev->data->nb_rx_queues; q++) { + rxq = fep->rx_queues[q]; + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { + mbuf = rxq->rx_mbuf[i]; + rxq->rx_mbuf[i] = NULL; + rte_pktmbuf_free(mbuf); + bdp = enet_get_nextdesc(bdp, &rxq->bd); + } + } + + for (q = 0; q < dev->data->nb_tx_queues; q++) { + txq = fep->tx_queues[q]; + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { + mbuf = txq->tx_mbuf[i]; + txq->tx_mbuf[i] = NULL; + rte_pktmbuf_free(mbuf); + } + } +} + static int enetfec_eth_configure(struct rte_eth_dev *dev) { @@ -165,6 +199,8 @@ static int enetfec_eth_start(struct rte_eth_dev *dev) { enetfec_restart(dev); + dev->rx_pkt_burst = &enetfec_recv_pkts; + dev->tx_pkt_burst = &enetfec_xmit_pkts; return 0; } @@ -191,6 +227,101 @@ enetfec_eth_stop(struct rte_eth_dev *dev) return 0; } +static int +enetfec_eth_close(struct rte_eth_dev *dev) +{ + enet_free_buffers(dev); + return 0; +} + +static int +enetfec_eth_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct rte_eth_link link; + unsigned int lstatus = 1; + + memset(&link, 0, sizeof(struct rte_eth_link)); + + link.link_status = lstatus; + link.link_speed = RTE_ETH_SPEED_NUM_1G; + + ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, + "Up"); + + return rte_eth_linkstatus_set(dev, &link); +} + +static int +enetfec_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct enetfec_private *fep = dev->data->dev_private; + uint32_t tmp; + + tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR); + tmp |= 0x8; + tmp &= ~0x2; + rte_write32(rte_cpu_to_le_32(tmp), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR); + + return 0; +} + +static int +enetfec_multicast_enable(struct rte_eth_dev *dev) +{ + struct enetfec_private *fep = dev->data->dev_private; + + rte_write32(rte_cpu_to_le_32(0xffffffff), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR); + rte_write32(rte_cpu_to_le_32(0xffffffff), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR); + dev->data->all_multicast = 1; + + rte_write32(rte_cpu_to_le_32(0x04400002), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR); + rte_write32(rte_cpu_to_le_32(0x10800049), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR); + + return 0; +} + +/* Set a MAC change in hardware. */ +static int +enetfec_set_mac_address(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct enetfec_private *fep = dev->data->dev_private; + + writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) | + (addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR); + writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR); + + rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); + + return 0; +} + +static int +enetfec_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct enetfec_private *fep = dev->data->dev_private; + struct rte_eth_stats *eth_stats = &fep->stats; + + stats->ipackets = eth_stats->ipackets; + stats->ibytes = eth_stats->ibytes; + stats->ierrors = eth_stats->ierrors; + stats->opackets = eth_stats->opackets; + stats->obytes = eth_stats->obytes; + stats->oerrors = eth_stats->oerrors; + stats->rx_nombuf = eth_stats->rx_nombuf; + + return 0; +} + static int enetfec_eth_info(__rte_unused struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) @@ -202,6 +333,18 @@ enetfec_eth_info(__rte_unused struct rte_eth_dev *dev, return 0; } +static void +enet_free_queue(struct rte_eth_dev *dev) +{ + struct enetfec_private *fep = dev->data->dev_private; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) + rte_free(fep->rx_queues[i]); + for (i = 0; i < dev->data->nb_tx_queues; i++) + rte_free(fep->rx_queues[i]); +} + static const unsigned short offset_des_active_rxq[] = { ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2 }; @@ -405,6 +548,12 @@ static const struct eth_dev_ops enetfec_ops = { .dev_configure = enetfec_eth_configure, .dev_start = enetfec_eth_start, .dev_stop = enetfec_eth_stop, + .dev_close = enetfec_eth_close, + .link_update = enetfec_eth_link_update, + .promiscuous_enable = enetfec_promiscuous_enable, + .allmulticast_enable = enetfec_multicast_enable, + .mac_addr_set = enetfec_set_mac_address, + .stats_get = enetfec_stats_get, .dev_infos_get = enetfec_eth_info, .rx_queue_setup = enetfec_rx_queue_setup, .tx_queue_setup = enetfec_tx_queue_setup @@ -430,6 +579,9 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev) int rc; int i; unsigned int bdsize; + struct rte_ether_addr macaddr = { + .addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 } + }; name = rte_vdev_device_name(vdev); ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name); @@ -470,6 +622,21 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev) fep->bd_addr_p = fep->bd_addr_p + bdsize; } + /* Copy the station address into the dev structure, */ + dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); + if (dev->data->mac_addrs == NULL) { + ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses", + RTE_ETHER_ADDR_LEN); + rc = -ENOMEM; + goto err; + } + + /* + * Set default mac address + */ + enetfec_set_mac_address(dev, &macaddr); + + fep->bufdesc_ex = ENETFEC_EXTENDED_BD; rc = enetfec_eth_init(dev); if (rc) goto failed_init; @@ -478,6 +645,8 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev) failed_init: ENETFEC_PMD_ERR("Failed to init"); +err: + rte_eth_dev_release_port(dev); return rc; } @@ -485,6 +654,8 @@ static int pmd_enetfec_remove(struct rte_vdev_device *vdev) { struct rte_eth_dev *eth_dev = NULL; + struct enetfec_private *fep; + struct enetfec_priv_rx_q *rxq; int ret; /* find the ethdev entry */ @@ -492,11 +663,22 @@ pmd_enetfec_remove(struct rte_vdev_device *vdev) if (eth_dev == NULL) return -ENODEV; + fep = eth_dev->data->dev_private; + /* Free descriptor base of first RX queue as it was configured + * first in enetfec_eth_init(). + */ + rxq = fep->rx_queues[0]; + rte_free(rxq->bd.base); + enet_free_queue(eth_dev); + enetfec_eth_stop(eth_dev); + ret = rte_eth_dev_release_port(eth_dev); if (ret != 0) return -EINVAL; ENETFEC_PMD_INFO("Release enetfec sw device"); + enetfec_cleanup(fep); + return 0; } diff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h index 27e124c339..06a6c10600 100644 --- a/drivers/net/enetfec/enet_ethdev.h +++ b/drivers/net/enetfec/enet_ethdev.h @@ -7,6 +7,10 @@ #include +#define BD_LEN 49152 +#define ENETFEC_TX_FR_SIZE 2048 +#define ETH_HLEN RTE_ETHER_HDR_LEN + /* full duplex */ #define FULL_DUPLEX 0x00 @@ -17,6 +21,21 @@ #define ENETFEC_MAX_RX_PKT_LEN 3000 #define __iomem +#if defined(RTE_ARCH_ARM) +#if defined(RTE_ARCH_64) +#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } +#define dcbf_64(p) dcbf(p) + +#else /* RTE_ARCH_32 */ +#define dcbf(p) RTE_SET_USED(p) +#define dcbf_64(p) dcbf(p) +#endif + +#else +#define dcbf(p) RTE_SET_USED(p) +#define dcbf_64(p) dcbf(p) +#endif + /* * ENETFEC can support 1 rx and tx queue.. */ @@ -71,6 +90,7 @@ struct enetfec_priv_rx_q { struct enetfec_private { struct rte_eth_dev *dev; + struct rte_eth_stats stats; int full_duplex; int flag_pause; uint32_t quirks; @@ -123,4 +143,9 @@ enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd) return ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2; } +uint16_t enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + #endif /*__ENETFEC_ETHDEV_H__*/ diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c new file mode 100644 index 0000000000..e61a217dcb --- /dev/null +++ b/drivers/net/enetfec/enet_rxtx.c @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2021 NXP + */ + +#include +#include +#include +#include "enet_regs.h" +#include "enet_ethdev.h" +#include "enet_pmd_logs.h" + +/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue + * When update through the ring, just set the empty indicator. + */ +uint16_t +enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mempool *pool; + struct bufdesc *bdp; + struct rte_mbuf *mbuf, *new_mbuf = NULL; + unsigned short status; + unsigned short pkt_len; + int pkt_received = 0, index = 0; + void *data; + struct enetfec_priv_rx_q *rxq = (struct enetfec_priv_rx_q *)rxq1; + struct rte_eth_stats *stats = &rxq->fep->stats; + pool = rxq->pool; + bdp = rxq->bd.cur; + + /* Process the incoming packet */ + status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc)); + while ((status & RX_BD_EMPTY) == 0) { + if (pkt_received >= nb_pkts) + break; + + new_mbuf = rte_pktmbuf_alloc(pool); + if (unlikely(new_mbuf == NULL)) { + stats->rx_nombuf++; + break; + } + /* Check for errors. */ + status ^= RX_BD_LAST; + if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO | + RX_BD_CR | RX_BD_OV | RX_BD_LAST | + RX_BD_TR)) { + stats->ierrors++; + if (status & RX_BD_OV) { + /* FIFO overrun */ + /* enet_dump_rx(rxq); */ + ENETFEC_DP_LOG(DEBUG, "rx_fifo_error"); + goto rx_processing_done; + } + if (status & (RX_BD_LG | RX_BD_SH + | RX_BD_LAST)) { + /* Frame too long or too short. */ + ENETFEC_DP_LOG(DEBUG, "rx_length_error"); + if (status & RX_BD_LAST) + ENETFEC_DP_LOG(DEBUG, "rcv is not +last"); + } + if (status & RX_BD_CR) { /* CRC Error */ + ENETFEC_DP_LOG(DEBUG, "rx_crc_errors"); + } + /* Report late collisions as a frame error. */ + if (status & (RX_BD_NO | RX_BD_TR)) + ENETFEC_DP_LOG(DEBUG, "rx_frame_error"); + goto rx_processing_done; + } + + /* Process the incoming frame. */ + stats->ipackets++; + pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen)); + stats->ibytes += pkt_len; + + /* shows data with respect to the data_off field. */ + index = enet_get_bd_index(bdp, &rxq->bd); + mbuf = rxq->rx_mbuf[index]; + + data = rte_pktmbuf_mtod(mbuf, uint8_t *); + rte_prefetch0(data); + rte_pktmbuf_append((struct rte_mbuf *)mbuf, + pkt_len - 4); + + if (rxq->fep->quirks & QUIRK_RACC) + data = rte_pktmbuf_adj(mbuf, 2); + + rx_pkts[pkt_received] = mbuf; + pkt_received++; + rxq->rx_mbuf[index] = new_mbuf; + rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)), + &bdp->bd_bufaddr); +rx_processing_done: + /* when rx_processing_done clear the status flags + * for this buffer + */ + status &= ~RX_BD_STATS; + + /* Mark the buffer empty */ + status |= RX_BD_EMPTY; + + if (rxq->fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + rte_write32(rte_cpu_to_le_32(RX_BD_INT), + &ebdp->bd_esc); + rte_write32(0, &ebdp->bd_prot); + rte_write32(0, &ebdp->bd_bdu); + } + + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + rte_wmb(); + rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc); + + /* Update BD pointer to next entry */ + bdp = enet_get_nextdesc(bdp, &rxq->bd); + + /* Doing this here will keep the FEC running while we process + * incoming frames. + */ + rte_write32(0, rxq->bd.active_reg_desc); + status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc)); + } + rxq->bd.cur = bdp; + return pkt_received; +} + +uint16_t +enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct enetfec_priv_tx_q *txq = + (struct enetfec_priv_tx_q *)tx_queue; + struct rte_eth_stats *stats = &txq->fep->stats; + struct bufdesc *bdp, *last_bdp; + struct rte_mbuf *mbuf; + unsigned short status; + unsigned short buflen; + unsigned int index, estatus = 0; + unsigned int i, pkt_transmitted = 0; + uint8_t *data; + int tx_st = 1; + + while (tx_st) { + if (pkt_transmitted >= nb_pkts) { + tx_st = 0; + break; + } + bdp = txq->bd.cur; + /* First clean the ring */ + index = enet_get_bd_index(bdp, &txq->bd); + status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc)); + + if (status & TX_BD_READY) { + stats->oerrors++; + break; + } + if (txq->tx_mbuf[index]) { + rte_pktmbuf_free(txq->tx_mbuf[index]); + txq->tx_mbuf[index] = NULL; + } + + mbuf = *(tx_pkts); + tx_pkts++; + + /* Fill in a Tx ring entry */ + last_bdp = bdp; + status &= ~TX_BD_STATS; + + /* Set buffer length and buffer pointer */ + buflen = rte_pktmbuf_pkt_len(mbuf); + stats->opackets++; + stats->obytes += buflen; + + if (mbuf->nb_segs > 1) { + ENETFEC_DP_LOG(DEBUG, "SG not supported"); + return -1; + } + status |= (TX_BD_LAST); + data = rte_pktmbuf_mtod(mbuf, void *); + for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE) + dcbf(data + i); + + rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)), + &bdp->bd_bufaddr); + rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen); + + if (txq->fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + rte_write32(0, &ebdp->bd_bdu); + rte_write32(rte_cpu_to_le_32(estatus), + &ebdp->bd_esc); + } + + index = enet_get_bd_index(last_bdp, &txq->bd); + /* Save mbuf pointer */ + txq->tx_mbuf[index] = mbuf; + + /* Make sure the updates to rest of the descriptor are performed + * before transferring ownership. + */ + status |= (TX_BD_READY | TX_BD_TC); + rte_wmb(); + rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc); + + /* Trigger transmission start */ + rte_write32(0, txq->bd.active_reg_desc); + pkt_transmitted++; + + /* If this was the last BD in the ring, start at the + * beginning again. + */ + bdp = enet_get_nextdesc(last_bdp, &txq->bd); + + /* Make sure the update to bdp and tx_skbuff are performed + * before txq->bd.cur. + */ + txq->bd.cur = bdp; + } + return nb_pkts; +} diff --git a/drivers/net/enetfec/meson.build b/drivers/net/enetfec/meson.build index 3fb0f73071..551cd5358c 100644 --- a/drivers/net/enetfec/meson.build +++ b/drivers/net/enetfec/meson.build @@ -8,4 +8,6 @@ endif sources = files( 'enet_ethdev.c', - 'enet_uio.c') + 'enet_uio.c', + 'enet_rxtx.c' +) -- 2.17.1