From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: niklas.soderlund@corigine.com,
Chaoyong He <chaoyong.he@corigine.com>,
Heinrich Kuhn <heinrich.kuhn@corigine.com>
Subject: [PATCH v5 09/12] net/nfp: add flower ctrl VNIC rxtx logic
Date: Fri, 5 Aug 2022 14:32:32 +0800 [thread overview]
Message-ID: <1659681155-16525-10-git-send-email-chaoyong.he@corigine.com> (raw)
In-Reply-To: <1659681155-16525-1-git-send-email-chaoyong.he@corigine.com>
Add a Rx and Tx function for the control vNIC. The logic is mostly
identical to the normal Rx and Tx functionality of the NFP PMD.
This commit also makes use of the ctrl vNIC service logic to
service the ctrl vNIC Rx path.
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Signed-off-by: Heinrich Kuhn <heinrich.kuhn@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
drivers/net/nfp/flower/nfp_flower.c | 15 ++
drivers/net/nfp/flower/nfp_flower.h | 15 ++
drivers/net/nfp/flower/nfp_flower_ctrl.c | 252 +++++++++++++++++++++++++++++++
drivers/net/nfp/flower/nfp_flower_ctrl.h | 13 ++
drivers/net/nfp/meson.build | 1 +
5 files changed, 296 insertions(+)
create mode 100644 drivers/net/nfp/flower/nfp_flower_ctrl.c
create mode 100644 drivers/net/nfp/flower/nfp_flower_ctrl.h
diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 51df504..5e9c4ef 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -21,6 +21,7 @@
#include "../nfpcore/nfp_nsp.h"
#include "nfp_flower.h"
#include "nfp_flower_ovs_compat.h"
+#include "nfp_flower_ctrl.h"
#define MAX_PKT_BURST 32
#define MEMPOOL_CACHE_SIZE 512
@@ -216,7 +217,21 @@
.link_update = nfp_flower_pf_link_update,
};
+static int
+nfp_flower_ctrl_vnic_service(void *arg)
+{
+ struct nfp_app_flower *app_flower = arg;
+
+ nfp_flower_ctrl_vnic_poll(app_flower);
+
+ return 0;
+}
+
static struct rte_service_spec flower_services[NFP_FLOWER_SERVICE_MAX] = {
+ [NFP_FLOWER_SERVICE_CTRL] = {
+ .name = "flower_ctrl_vnic_service",
+ .callback = nfp_flower_ctrl_vnic_service,
+ },
};
static int
diff --git a/drivers/net/nfp/flower/nfp_flower.h b/drivers/net/nfp/flower/nfp_flower.h
index f11ef6d..bdc64e3 100644
--- a/drivers/net/nfp/flower/nfp_flower.h
+++ b/drivers/net/nfp/flower/nfp_flower.h
@@ -7,9 +7,18 @@
#define _NFP_FLOWER_H_
enum nfp_flower_service {
+ NFP_FLOWER_SERVICE_CTRL,
NFP_FLOWER_SERVICE_MAX
};
+/*
+ * Flower fallback and ctrl path always adds and removes
+ * 8 bytes of prepended data. Tx descriptors must point
+ * to the correct packet data offset after metadata has
+ * been added
+ */
+#define FLOWER_PKT_DATA_OFFSET 8
+
/* The flower application's private structure */
struct nfp_app_flower {
/* List of rte_service ID's for the flower app */
@@ -29,6 +38,12 @@ struct nfp_app_flower {
/* the eth table as reported by firmware */
struct nfp_eth_table *nfp_eth_table;
+
+ /* Ctrl vNIC Rx counter */
+ uint64_t ctrl_vnic_rx_count;
+
+ /* Ctrl vNIC Tx counter */
+ uint64_t ctrl_vnic_tx_count;
};
int nfp_init_app_flower(struct nfp_pf_dev *pf_dev);
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c
new file mode 100644
index 0000000..e73054e
--- /dev/null
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include <rte_common.h>
+#include <ethdev_pci.h>
+
+#include "../nfp_common.h"
+#include "../nfp_logs.h"
+#include "../nfp_ctrl.h"
+#include "../nfp_rxtx.h"
+#include "nfp_flower.h"
+#include "nfp_flower_ctrl.h"
+
+#define MAX_PKT_BURST 32
+
+static uint16_t
+nfp_flower_ctrl_vnic_recv(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint64_t dma_addr;
+ uint16_t avail = 0;
+ struct rte_mbuf *mb;
+ uint16_t nb_hold = 0;
+ struct nfp_net_hw *hw;
+ struct nfp_net_rxq *rxq;
+ struct rte_mbuf *new_mb;
+ struct nfp_net_rx_buff *rxb;
+ struct nfp_net_rx_desc *rxds;
+
+ rxq = rx_queue;
+ if (unlikely(rxq == NULL)) {
+ /*
+ * DPDK just checks the queue is lower than max queues
+ * enabled. But the queue needs to be configured
+ */
+ PMD_RX_LOG(ERR, "RX Bad queue");
+ return 0;
+ }
+
+ hw = rxq->hw;
+ while (avail < nb_pkts) {
+ rxb = &rxq->rxbufs[rxq->rd_p];
+ if (unlikely(rxb == NULL)) {
+ PMD_RX_LOG(ERR, "rxb does not exist!");
+ break;
+ }
+
+ rxds = &rxq->rxds[rxq->rd_p];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
+ /*
+ * Memory barrier to ensure that we won't do other
+ * reads before the DD bit.
+ */
+ rte_rmb();
+
+ /*
+ * We got a packet. Let's alloc a new mbuf for refilling the
+ * free descriptor ring as soon as possible
+ */
+ new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
+ if (unlikely(new_mb == NULL)) {
+ PMD_RX_LOG(ERR,
+ "RX mbuf alloc failed port_id=%u queue_id=%u",
+ rxq->port_id, (unsigned int)rxq->qidx);
+ nfp_net_mbuf_alloc_failed(rxq);
+ break;
+ }
+
+ nb_hold++;
+
+ /*
+ * Grab the mbuf and refill the descriptor with the
+ * previously allocated mbuf
+ */
+ mb = rxb->mbuf;
+ rxb->mbuf = new_mb;
+
+ /* Size of this segment */
+ mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+ /* Size of the whole packet. We just support 1 segment */
+ mb->pkt_len = mb->data_len;
+
+ if (unlikely((mb->data_len + hw->rx_offset) > rxq->mbuf_size)) {
+ rte_pktmbuf_free(mb);
+ /*
+ * This should not happen and the user has the
+ * responsibility of avoiding it. But we have
+ * to give some info about the error
+ */
+ RTE_LOG_DP(ERR, PMD,
+ "mbuf overflow likely due to the RX offset.\n"
+ "\t\tYour mbuf size should have extra space for"
+ " RX offset=%u bytes.\n"
+ "\t\tCurrently you just have %u bytes available"
+ " but the received packet is %u bytes long",
+ hw->rx_offset,
+ rxq->mbuf_size - hw->rx_offset,
+ mb->data_len);
+ break;
+ }
+
+ /* Filling the received mbuf with packet info */
+ if (hw->rx_offset)
+ mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
+ else
+ mb->data_off = RTE_PKTMBUF_HEADROOM + NFP_DESC_META_LEN(rxds);
+
+ /* No scatter mode supported */
+ mb->nb_segs = 1;
+ mb->next = NULL;
+ mb->port = rxq->port_id;
+
+ rx_pkts[avail++] = mb;
+
+ /* Now resetting and updating the descriptor */
+ rxds->vals[0] = 0;
+ rxds->vals[1] = 0;
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
+ rxds->fld.dd = 0;
+ rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
+ rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
+
+ rxq->rd_p++;
+ if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+ rxq->rd_p = 0;
+ }
+
+ if (nb_hold == 0)
+ return 0;
+
+ nb_hold += rxq->nb_rx_hold;
+
+ /*
+ * FL descriptors needs to be written before incrementing the
+ * FL queue WR pointer
+ */
+ rte_wmb();
+ if (nb_hold >= rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port=%hu queue=%d nb_hold=%hu avail=%hu",
+ rxq->port_id, rxq->qidx, nb_hold, avail);
+ nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+
+ return avail;
+}
+
+uint16_t
+nfp_flower_ctrl_vnic_xmit(struct nfp_app_flower *app_flower,
+ struct rte_mbuf *mbuf)
+{
+ uint16_t cnt = 0;
+ uint64_t dma_addr;
+ uint32_t free_descs;
+ struct rte_mbuf **lmbuf;
+ struct nfp_net_txq *txq;
+ struct nfp_net_hw *ctrl_hw;
+ struct rte_eth_dev *ctrl_dev;
+ struct nfp_net_nfd3_tx_desc *txds;
+
+ ctrl_hw = app_flower->ctrl_hw;
+ ctrl_dev = ctrl_hw->eth_dev;
+
+ /* Flower ctrl vNIC only has a single tx queue */
+ txq = ctrl_dev->data->tx_queues[0];
+ if (unlikely(txq == NULL)) {
+ /*
+ * DPDK just checks the queue is lower than max queues
+ * enabled. But the queue needs to be configured
+ */
+ PMD_TX_LOG(ERROR, "ctrl dev TX Bad queue");
+ goto xmit_end;
+ }
+
+ txds = &txq->txds[txq->wr_p];
+ txds->vals[0] = 0;
+ txds->vals[1] = 0;
+ txds->vals[2] = 0;
+ txds->vals[3] = 0;
+
+ if (nfp_net_nfd3_txq_full(txq))
+ nfp_net_tx_free_bufs(txq);
+
+ free_descs = nfp_net_nfd3_free_tx_desc(txq);
+ if (unlikely(free_descs == 0)) {
+ PMD_TX_LOG(ERROR, "ctrl dev no free descs");
+ goto xmit_end;
+ }
+
+ lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+ RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
+ if (*lmbuf)
+ rte_pktmbuf_free_seg(*lmbuf);
+
+ *lmbuf = mbuf;
+ dma_addr = rte_mbuf_data_iova(mbuf);
+
+ txds->data_len = mbuf->pkt_len;
+ txds->dma_len = txds->data_len;
+ txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
+ txds->dma_addr_lo = (dma_addr & 0xffffffff);
+ txds->offset_eop = FLOWER_PKT_DATA_OFFSET | PCIE_DESC_TX_EOP;
+
+ txq->wr_p++;
+ if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+ txq->wr_p = 0;
+
+ cnt++;
+ app_flower->ctrl_vnic_tx_count++;
+
+xmit_end:
+ rte_wmb();
+ nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, 1);
+
+ return cnt;
+}
+
+void
+nfp_flower_ctrl_vnic_poll(struct nfp_app_flower *app_flower)
+{
+ uint16_t i;
+ uint16_t count;
+ struct nfp_net_rxq *rxq;
+ struct nfp_net_hw *ctrl_hw;
+ struct rte_eth_dev *ctrl_eth_dev;
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+
+ ctrl_hw = app_flower->ctrl_hw;
+ ctrl_eth_dev = ctrl_hw->eth_dev;
+
+ /* ctrl vNIC only has a single Rx queue */
+ rxq = ctrl_eth_dev->data->rx_queues[0];
+ count = nfp_flower_ctrl_vnic_recv(rxq, pkts_burst, MAX_PKT_BURST);
+ if (count > MAX_PKT_BURST) {
+ PMD_RX_LOG(ERR, "nfp_net_ctrl_vnic_recv failed!");
+ return;
+ }
+
+ if (count) {
+ app_flower->ctrl_vnic_rx_count += count;
+ /* Process cmsgs here, only free for now */
+ for (i = 0; i < count; i++)
+ rte_pktmbuf_free(pkts_burst[i]);
+ }
+}
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.h b/drivers/net/nfp/flower/nfp_flower_ctrl.h
new file mode 100644
index 0000000..74765c9
--- /dev/null
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _NFP_FLOWER_CTRL_H_
+#define _NFP_FLOWER_CTRL_H_
+
+void nfp_flower_ctrl_vnic_poll(struct nfp_app_flower *app_flower);
+uint16_t nfp_flower_ctrl_vnic_xmit(struct nfp_app_flower *app_flower,
+ struct rte_mbuf *mbuf);
+
+#endif /* _NFP_FLOWER_CTRL_H_ */
diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build
index 7ae3115..8710213 100644
--- a/drivers/net/nfp/meson.build
+++ b/drivers/net/nfp/meson.build
@@ -7,6 +7,7 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
endif
sources = files(
'flower/nfp_flower.c',
+ 'flower/nfp_flower_ctrl.c',
'nfpcore/nfp_cpp_pcie_ops.c',
'nfpcore/nfp_nsp.c',
'nfpcore/nfp_cppcore.c',
--
1.8.3.1
next prev parent reply other threads:[~2022-08-05 6:34 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-05 6:32 [PATCH v5 00/12] preparation for the rte_flow offload of nfp PMD Chaoyong He
2022-08-05 6:32 ` [PATCH v5 01/12] net/nfp: move app specific attributes to own struct Chaoyong He
2022-08-05 10:49 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 02/12] net/nfp: simplify initialization and remove dead code Chaoyong He
2022-08-05 6:32 ` [PATCH v5 03/12] net/nfp: move app specific init logic to own function Chaoyong He
2022-08-05 10:53 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 04/12] net/nfp: add initial flower firmware support Chaoyong He
2022-08-05 11:00 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 05/12] net/nfp: add flower PF setup and mempool init logic Chaoyong He
2022-08-05 12:49 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 06/12] net/nfp: add flower PF related routines Chaoyong He
2022-08-05 12:55 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 07/12] net/nfp: add flower ctrl VNIC related logics Chaoyong He
2022-08-05 13:05 ` Andrew Rybchenko
2022-08-08 11:32 ` Chaoyong He
2022-08-08 14:45 ` Stephen Hemminger
2022-08-10 1:51 ` Chaoyong He
2022-08-10 19:39 ` Stephen Hemminger
2022-08-11 1:26 ` Chaoyong He
2022-08-11 4:24 ` Stephen Hemminger
2022-08-11 6:31 ` Chaoyong He
2022-08-11 15:07 ` Stephen Hemminger
2022-08-05 6:32 ` [PATCH v5 08/12] net/nfp: move common rxtx function for flower use Chaoyong He
2022-08-05 6:32 ` Chaoyong He [this message]
2022-08-05 6:32 ` [PATCH v5 10/12] net/nfp: add flower representor framework Chaoyong He
2022-08-05 14:23 ` Andrew Rybchenko
2022-08-08 11:56 ` Chaoyong He
2022-08-05 6:32 ` [PATCH v5 11/12] net/nfp: move rxtx function to header file Chaoyong He
2022-08-05 6:32 ` [PATCH v5 12/12] net/nfp: add flower PF rxtx logic Chaoyong He
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1659681155-16525-10-git-send-email-chaoyong.he@corigine.com \
--to=chaoyong.he@corigine.com \
--cc=dev@dpdk.org \
--cc=heinrich.kuhn@corigine.com \
--cc=niklas.soderlund@corigine.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).