DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/cnxk: fix separate callback for Rx flush on CN10k
@ 2023-10-16  5:29 Rahul Bhansali
  2023-10-18  5:11 ` Jerin Jacob
  0 siblings, 1 reply; 2+ messages in thread
From: Rahul Bhansali @ 2023-10-16  5:29 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Jerin Jacob
  Cc: Rahul Bhansali

In case of dev stop, Rx packet flush will be called which
uses LMT lines to bulk free of the pending meta buffers.
And LMT line is not valid for non EAL cores. As a fix,
a separate callback for Rx packets flush is added, which
will use NPA aura free API on individual meta packets.

Fixes: 4382a7ccf781 ("net/cnxk: support Rx security offload on cn10k")

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_rx.h        | 93 ++++++++++++++++++++++++++++++
 drivers/net/cnxk/cn10k_rx_select.c | 10 +++-
 2 files changed, 101 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index f5e935d383..7bb4c86d75 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -1098,6 +1098,99 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 	return nb_pkts;
 }
 
+static __rte_always_inline uint16_t
+cn10k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
+			  const uint16_t flags)
+{
+	struct cn10k_eth_rxq *rxq = rx_queue;
+	const uint64_t mbuf_init = rxq->mbuf_initializer;
+	const void *lookup_mem = rxq->lookup_mem;
+	const uint64_t data_off = rxq->data_off;
+	struct rte_mempool *meta_pool = NULL;
+	const uint64_t wdata = rxq->wdata;
+	const uint32_t qmask = rxq->qmask;
+	const uintptr_t desc = rxq->desc;
+	uint64_t lbase = rxq->lmt_base;
+	uint16_t packets = 0, nb_pkts;
+	uint16_t lmt_id __rte_unused;
+	uint32_t head = rxq->head;
+	struct nix_cqe_hdr_s *cq;
+	struct rte_mbuf *mbuf;
+	uint64_t sa_base = 0;
+	uintptr_t cpth = 0;
+	uint8_t loff = 0;
+	uint64_t laddr;
+
+	nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
+
+	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+		sa_base = rxq->sa_base;
+		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+		laddr = lbase;
+		laddr += 8;
+		if (flags & NIX_RX_REAS_F)
+			meta_pool = (struct rte_mempool *)rxq->meta_pool;
+	}
+
+	while (packets < nb_pkts) {
+		/* Prefetch N desc ahead */
+		rte_prefetch_non_temporal((void *)(desc + (CQE_SZ((head + 2) & qmask))));
+		cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
+
+		mbuf = nix_get_mbuf_from_cqe(cq, data_off);
+
+		/* Mark mempool obj as "get" as it is alloc'ed by NIX */
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
+
+		/* Translate meta to mbuf */
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			const uint64_t cq_w1 = *((const uint64_t *)cq + 1);
+			const uint64_t cq_w5 = *((const uint64_t *)cq + 5);
+			struct rte_mbuf *meta_buf = mbuf;
+
+			cpth = ((uintptr_t)meta_buf + (uint16_t)data_off);
+
+			/* Update mempool pointer for full mode pkt */
+			if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) &&
+			    !((*(uint64_t *)cpth) & BIT(15)))
+				meta_buf->pool = meta_pool;
+
+			mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, laddr, &loff,
+						       meta_buf, data_off, flags, mbuf_init);
+			/* Free Meta mbuf, not use LMT line for flush as this will be called
+			 * from non-datapath i.e. dev_stop case.
+			 */
+			if (loff) {
+				roc_npa_aura_op_free(meta_buf->pool->pool_id, 0,
+						     (uint64_t)meta_buf);
+				loff = 0;
+			}
+		}
+
+		cn10k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
+				      cpth, sa_base, flags);
+		cn10k_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
+					(flags & NIX_RX_OFFLOAD_TSTAMP_F),
+					(uint64_t *)((uint8_t *)mbuf + data_off));
+		rx_pkts[packets++] = mbuf;
+		roc_prefetch_store_keep(mbuf);
+		head++;
+		head &= qmask;
+	}
+
+	rxq->head = head;
+	rxq->available -= nb_pkts;
+
+	/* Free all the CQs that we've processed */
+	plt_write64((wdata | nb_pkts), rxq->cq_door);
+
+	if (flags & NIX_RX_OFFLOAD_SECURITY_F)
+		rte_io_wmb();
+
+	return nb_pkts;
+}
+
 #if defined(RTE_ARCH_ARM64)
 
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cn10k_rx_select.c b/drivers/net/cnxk/cn10k_rx_select.c
index 1d44f2924e..6a5c34287e 100644
--- a/drivers/net/cnxk/cn10k_rx_select.c
+++ b/drivers/net/cnxk/cn10k_rx_select.c
@@ -22,6 +22,13 @@ pick_rx_func(struct rte_eth_dev *eth_dev,
 	rte_atomic_thread_fence(__ATOMIC_RELEASE);
 }
 
+static uint16_t __rte_noinline __rte_hot __rte_unused
+cn10k_nix_flush_rx(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)
+{
+	const uint16_t flags = NIX_RX_MULTI_SEG_F | NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F;
+	return cn10k_nix_flush_recv_pkts(rx_queue, rx_pkts, pkts, flags);
+}
+
 void
 cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 {
@@ -82,8 +89,7 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 
 	/* Copy multi seg version with security for tear down sequence */
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-		dev->rx_pkt_burst_no_offload =
-			nix_eth_rx_burst_mseg_reas[NIX_RX_OFFLOAD_SECURITY_F];
+		dev->rx_pkt_burst_no_offload = cn10k_nix_flush_rx;
 
 	if (dev->scalar_ena) {
 		if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] net/cnxk: fix separate callback for Rx flush on CN10k
  2023-10-16  5:29 [PATCH] net/cnxk: fix separate callback for Rx flush on CN10k Rahul Bhansali
@ 2023-10-18  5:11 ` Jerin Jacob
  0 siblings, 0 replies; 2+ messages in thread
From: Jerin Jacob @ 2023-10-18  5:11 UTC (permalink / raw)
  To: Rahul Bhansali
  Cc: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Jerin Jacob

On Mon, Oct 16, 2023 at 1:26 PM Rahul Bhansali <rbhansali@marvell.com> wrote:
>
> In case of dev stop, Rx packet flush will be called which
> uses LMT lines to bulk free of the pending meta buffers.
> And LMT line is not valid for non EAL cores. As a fix,
> a separate callback for Rx packets flush is added, which
> will use NPA aura free API on individual meta packets.
>
> Fixes: 4382a7ccf781 ("net/cnxk: support Rx security offload on cn10k")
>
> Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>


Applied to dpdk-next-net-mrvl/for-next-net. Thanks

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-10-18  5:12 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-10-16  5:29 [PATCH] net/cnxk: fix separate callback for Rx flush on CN10k Rahul Bhansali
2023-10-18  5:11 ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).