DPDK patches and discussions
 help / color / mirror / Atom feed
From: beilei.xing@intel.com
To: jingjing.wu@intel.com, mingxia.liu@intel.com
Cc: dev@dpdk.org, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH 16/19] net/cpfl: support representor data path
Date: Wed,  9 Aug 2023 15:51:31 +0000	[thread overview]
Message-ID: <20230809155134.539287-17-beilei.xing@intel.com> (raw)
In-Reply-To: <20230809155134.539287-1-beilei.xing@intel.com>

From: Beilei Xing <beilei.xing@intel.com>

Add Rx/Tx burst for port representor.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_representor.c |  83 +++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.c        | 121 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.h        |   4 +
 3 files changed, 208 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 79cb7f76d4..51b70ea346 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -491,6 +491,87 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.stats_reset		= idpf_repr_stats_reset,
 };
 
+#define MAX_IDPF_REPRENSENTOR_BURST  128
+static uint16_t
+cpfl_repr_rx_burst(void *rxq,
+		   struct rte_mbuf **rx_pkts,
+		   uint16_t nb_pkts)
+{
+	struct cpfl_repr_rx_queue *rx_queue = rxq;
+	struct rte_ring *ring = rx_queue->rx_ring;
+	struct rte_mbuf *mbuf[MAX_IDPF_REPRENSENTOR_BURST] = {NULL};
+	unsigned int nb_recv;
+	uint16_t i;
+
+	if (unlikely(!ring))
+		return 0;
+
+	nb_recv = rte_ring_dequeue_burst(ring, (void **)mbuf,
+					 RTE_MIN(nb_pkts, MAX_IDPF_REPRENSENTOR_BURST), NULL);
+	for (i = 0; i < nb_recv; i++) {
+		if (mbuf[i]->pool != rx_queue->mb_pool) {
+			/* need copy if mpools used for vport and represntor queue are different */
+			rx_pkts[i] = rte_pktmbuf_copy(mbuf[i], rx_queue->mb_pool, 0, UINT32_MAX);
+			rte_pktmbuf_free(mbuf[i]);
+		} else {
+			rx_pkts[i] = mbuf[i];
+		}
+	}
+
+	__atomic_fetch_add(&rx_queue->stats.packets, nb_recv, __ATOMIC_RELAXED);
+	/* TODO: bytes stats */
+	return nb_recv;
+}
+
+static uint16_t
+cpfl_get_vsi_from_vf_representor(struct cpfl_repr *repr)
+{
+	return repr->vport_info->vport_info.vsi_id;
+}
+
+static uint16_t
+cpfl_repr_tx_burst(void *txq,
+		   struct rte_mbuf **tx_pkts,
+		   uint16_t nb_pkts)
+{
+	struct cpfl_repr_tx_queue *tx_queue = txq;
+	struct idpf_tx_queue *hw_txq = &tx_queue->txq->base;
+	struct cpfl_repr *repr;
+	uint16_t vsi_id;
+	uint16_t nb;
+
+	if (unlikely(!tx_queue->txq))
+		return 0;
+
+	repr = tx_queue->repr;
+
+	if (!hw_txq) {
+		PMD_INIT_LOG(ERR, "No Queue associated with representor host_id: %d, %s %d",
+			     repr->repr_id.host_id,
+			     (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) ? "vf" : "pf",
+			     (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) ? repr->repr_id.vf_id :
+			     repr->repr_id.pf_id);
+		return 0;
+	}
+
+	if (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) {
+		vsi_id = cpfl_get_vsi_from_vf_representor(repr);
+	} else {
+		/* TODO: RTE_ETH_REPRESENTOR_PF */
+		PMD_INIT_LOG(ERR, "Get vsi from pf representor is not supported.");
+		return 0;
+	}
+
+	rte_spinlock_lock(&tx_queue->txq->lock);
+	nb = cpfl_xmit_pkts_to_vsi(tx_queue->txq, tx_pkts, nb_pkts, vsi_id);
+	rte_spinlock_unlock(&tx_queue->txq->lock);
+
+	__atomic_fetch_add(&tx_queue->stats.packets, nb, __ATOMIC_RELAXED);
+	__atomic_fetch_add(&tx_queue->stats.errors, nb, __ATOMIC_RELAXED);
+	/* TODO: bytes stats */
+	return nb;
+}
+
 static int
 cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 {
@@ -507,6 +588,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 		repr->func_up = true;
 
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
+	eth_dev->rx_pkt_burst = cpfl_repr_rx_burst;
+	eth_dev->tx_pkt_burst = cpfl_repr_tx_burst;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
 	/* bit[15:14] type
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index df6a8c1940..882efe04cf 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -616,6 +616,9 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	txq->ops = &def_txq_ops;
 	cpfl_vport->nb_data_txq++;
 	txq->q_set = true;
+
+	rte_spinlock_init(&cpfl_txq->lock);
+
 	dev->data->tx_queues[queue_idx] = cpfl_txq;
 
 	return 0;
@@ -1409,6 +1412,124 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
 	}
 }
 
+static inline void
+cpfl_set_tx_switch_ctx(uint16_t vsi_id, bool is_vsi,
+		       volatile union idpf_flex_tx_ctx_desc *ctx_desc)
+{
+	uint16_t cmd_dtype;
+
+	/* Use TX Native TSO Context Descriptor to carry VSI
+	 * so TSO is not supported
+	 */
+	if (is_vsi) {
+		cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+			IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI;
+		ctx_desc->tso.qw0.mss_rt =
+			rte_cpu_to_le_16((uint16_t)vsi_id &
+				 IDPF_TXD_FLEX_CTX_MSS_RT_M);
+	} else {
+		cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+			IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK;
+	}
+
+	ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);
+}
+
+/* Transmit pkts to destination VSI,
+ * much similar as idpf_splitq_xmit_pkts
+ */
+uint16_t
+cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *cpfl_txq, struct rte_mbuf **tx_pkts,
+		      uint16_t nb_pkts, uint16_t vsi_id)
+{
+	volatile struct idpf_flex_tx_sched_desc *txr;
+	volatile struct idpf_flex_tx_sched_desc *txd;
+	volatile union idpf_flex_tx_ctx_desc *ctx_desc;
+	struct idpf_tx_entry *sw_ring;
+	struct idpf_tx_entry *txe, *txn;
+	uint16_t nb_used, tx_id, sw_id;
+	struct idpf_tx_queue *txq;
+	struct rte_mbuf *tx_pkt;
+	uint16_t nb_to_clean;
+	uint16_t nb_tx = 0;
+
+	if (unlikely(!cpfl_txq))
+		return nb_tx;
+
+	txq = &cpfl_txq->base;
+	if (unlikely(!txq) || unlikely(!txq->q_started))
+		return nb_tx;
+
+	txr = txq->desc_ring;
+	sw_ring = txq->sw_ring;
+	tx_id = txq->tx_tail;
+	sw_id = txq->sw_tail;
+	txe = &sw_ring[sw_id];
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		tx_pkt = tx_pkts[nb_tx];
+
+		if (txq->nb_free <= txq->free_thresh) {
+			/* TODO: Need to refine, refer to idpf_splitq_xmit_pkts */
+			nb_to_clean = 2 * txq->rs_thresh;
+			while (nb_to_clean--)
+				idpf_split_tx_free(txq->complq);
+		}
+
+		if (txq->nb_free < tx_pkt->nb_segs + 1)
+			break;
+		/* need context desc carry target vsi, no TSO support. */
+		nb_used = tx_pkt->nb_segs + 1;
+
+		/* context descriptor prepare*/
+		ctx_desc = (volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];
+
+		cpfl_set_tx_switch_ctx(vsi_id, true, ctx_desc);
+		tx_id++;
+		if (tx_id == txq->nb_tx_desc)
+			tx_id = 0;
+
+		do {
+			txd = &txr[tx_id];
+			txn = &sw_ring[txe->next_id];
+			txe->mbuf = tx_pkt;
+
+			/* Setup TX descriptor */
+			txd->buf_addr =
+				rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
+			txd->qw1.cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
+			txd->qw1.rxr_bufsize = tx_pkt->data_len;
+			txd->qw1.compl_tag = sw_id;
+			tx_id++;
+			if (tx_id == txq->nb_tx_desc)
+				tx_id = 0;
+			sw_id = txe->next_id;
+			txe = txn;
+			tx_pkt = tx_pkt->next;
+		} while (tx_pkt);
+
+		/* fill the last descriptor with End of Packet (EOP) bit */
+		txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP;
+
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+
+		if (txq->nb_used >= 32) {
+			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;
+			/* Update txq RE bit counters */
+			txq->nb_used = 0;
+		}
+	}
+
+	/* update the tail pointer if any packets were processed */
+	if (likely(nb_tx)) {
+		IDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+		txq->tx_tail = tx_id;
+		txq->sw_tail = sw_id;
+	}
+	return nb_tx;
+}
+
 uint16_t
 cpfl_dummy_recv_pkts(__rte_unused void *queue,
 		     __rte_unused struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 914a0485b5..463ab73323 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -72,6 +72,7 @@ struct cpfl_txq_hairpin_info {
 struct cpfl_tx_queue {
 	struct idpf_tx_queue base;
 	struct cpfl_txq_hairpin_info hairpin_info;
+	rte_spinlock_t lock;
 };
 
 static inline uint16_t
@@ -124,4 +125,7 @@ uint16_t cpfl_dummy_recv_pkts(void *queue,
 uint16_t cpfl_dummy_xmit_pkts(void *queue,
 			      struct rte_mbuf **tx_pkts,
 			      uint16_t nb_pkts);
+uint16_t cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *txq,
+			       struct rte_mbuf **tx_pkts,
+			       uint16_t nb_pkts, uint16_t vsi_id);
 #endif /* _CPFL_RXTX_H_ */
-- 
2.34.1


  parent reply	other threads:[~2023-08-09  7:35 UTC|newest]

Thread overview: 89+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
2023-08-09 15:51 ` [PATCH 01/19] net/cpfl: refine devargs parse and process beilei.xing
2023-08-09 15:51 ` [PATCH 02/19] net/cpfl: introduce interface structure beilei.xing
2023-08-09 15:51 ` [PATCH 03/19] net/cpfl: add cp channel beilei.xing
2023-08-09 15:51 ` [PATCH 04/19] net/cpfl: enable vport mapping beilei.xing
2023-08-09 15:51 ` [PATCH 05/19] net/cpfl: parse representor devargs beilei.xing
2023-08-09 15:51 ` [PATCH 06/19] net/cpfl: support probe again beilei.xing
2023-08-09 15:51 ` [PATCH 07/19] net/cpfl: create port representor beilei.xing
2023-08-09 15:51 ` [PATCH 08/19] net/cpfl: support vport list/info get beilei.xing
2023-08-09 15:51 ` [PATCH 09/19] net/cpfl: update vport info before creating representor beilei.xing
2023-08-09 15:51 ` [PATCH 10/19] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-09 15:51 ` [PATCH 11/19] net/cpfl: add exceptional vport beilei.xing
2023-08-09 15:51 ` [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup beilei.xing
2023-08-09 15:51 ` [PATCH 13/19] net/cpfl: support link update for representor beilei.xing
2023-08-09 15:51 ` [PATCH 14/19] net/cpfl: add stats ops " beilei.xing
2023-08-09 15:51 ` [PATCH 15/19] common/idpf: refine inline function beilei.xing
2023-08-09 15:51 ` beilei.xing [this message]
2023-08-09 15:51 ` [PATCH 17/19] net/cpfl: support dispatch process beilei.xing
2023-08-09 15:51 ` [PATCH 18/19] net/cpfl: add dispatch service beilei.xing
2023-08-09 15:51 ` [PATCH 19/19] doc: update release notes for representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
2023-08-16 15:05   ` [PATCH v2 01/12] net/cpfl: refine devargs parse and process beilei.xing
2023-08-16 15:05   ` [PATCH v2 02/12] net/cpfl: introduce interface structure beilei.xing
2023-08-16 15:05   ` [PATCH v2 03/12] net/cpfl: add cp channel beilei.xing
2023-08-16 15:05   ` [PATCH v2 04/12] net/cpfl: enable vport mapping beilei.xing
2023-08-16 15:05   ` [PATCH v2 05/12] net/cpfl: parse representor devargs beilei.xing
2023-08-16 15:05   ` [PATCH v2 06/12] net/cpfl: support probe again beilei.xing
2023-08-16 15:05   ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
2023-09-05  7:35     ` Liu, Mingxia
2023-09-05  8:30     ` Liu, Mingxia
2023-08-16 15:05   ` [PATCH v2 08/12] net/cpfl: support vport list/info get beilei.xing
2023-08-16 15:05   ` [PATCH v2 09/12] net/cpfl: update vport info before creating representor beilei.xing
2023-09-06  2:33     ` Liu, Mingxia
2023-08-16 15:05   ` [PATCH v2 10/12] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-16 15:05   ` [PATCH v2 11/12] net/cpfl: support link update for representor beilei.xing
2023-08-16 15:05   ` [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup " beilei.xing
2023-09-06  3:02     ` Liu, Mingxia
2023-09-07 15:15   ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
2023-09-07 15:15     ` [PATCH v3 01/11] net/cpfl: refine devargs parse and process beilei.xing
2023-09-07 15:15     ` [PATCH v3 02/11] net/cpfl: introduce interface structure beilei.xing
2023-09-07 15:15     ` [PATCH v3 03/11] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-07 15:15     ` [PATCH v3 04/11] net/cpfl: introduce CP channel API beilei.xing
2023-09-07 15:16     ` [PATCH v3 05/11] net/cpfl: enable vport mapping beilei.xing
2023-09-07 15:16     ` [PATCH v3 06/11] net/cpfl: parse representor devargs beilei.xing
2023-09-07 15:16     ` [PATCH v3 07/11] net/cpfl: support probe again beilei.xing
2023-09-07 15:16     ` [PATCH v3 08/11] net/cpfl: create port representor beilei.xing
2023-09-07 15:16     ` [PATCH v3 09/11] net/cpfl: support vport list/info get beilei.xing
2023-09-07 15:16     ` [PATCH v3 10/11] net/cpfl: update vport info before creating representor beilei.xing
2023-09-07 15:16     ` [PATCH v3 11/11] net/cpfl: support link update for representor beilei.xing
2023-09-08 11:16     ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
2023-09-08 11:16       ` [PATCH v4 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-08 11:16       ` [PATCH v4 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-09  2:08         ` Wu, Jingjing
2023-09-08 11:16       ` [PATCH v4 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-09  2:13         ` Wu, Jingjing
2023-09-08 11:16       ` [PATCH v4 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-08 11:16       ` [PATCH v4 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-08 11:16       ` [PATCH v4 06/10] net/cpfl: parse representor devargs beilei.xing
2023-09-08 11:16       ` [PATCH v4 07/10] net/cpfl: support probe again beilei.xing
2023-09-08 11:16       ` [PATCH v4 08/10] net/cpfl: support vport list/info get beilei.xing
2023-09-09  2:34         ` Wu, Jingjing
2023-09-08 11:17       ` [PATCH v4 09/10] net/cpfl: create port representor beilei.xing
2023-09-09  3:04         ` Wu, Jingjing
2023-09-08 11:17       ` [PATCH v4 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-09  3:05         ` Wu, Jingjing
2023-09-12 16:26       ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 16:26         ` [PATCH v5 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 16:26         ` [PATCH v5 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 16:26         ` [PATCH v5 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 16:26         ` [PATCH v5 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 16:26         ` [PATCH v5 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 16:26         ` [PATCH v5 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 16:26         ` [PATCH v5 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 16:26         ` [PATCH v5 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 16:26         ` [PATCH v5 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 16:26         ` [PATCH v5 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-12 17:30         ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 17:30           ` [PATCH v6 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 17:30           ` [PATCH v6 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 17:30           ` [PATCH v6 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 17:30           ` [PATCH v6 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 17:30           ` [PATCH v6 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 17:30           ` [PATCH v6 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 17:30           ` [PATCH v6 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 17:30           ` [PATCH v6 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 17:30           ` [PATCH v6 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 17:30           ` [PATCH v6 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-13  1:01           ` [PATCH v6 00/10] net/cpfl: support port representor Wu, Jingjing
2023-09-13  5:41             ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230809155134.539287-17-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=mingxia.liu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).