DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
	"Kiran Kumar K" <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [PATCH v2 1/2] net/cnxk: optimize Rx pktsize extraction
Date: Thu, 24 Feb 2022 21:40:11 +0530	[thread overview]
Message-ID: <20220224161013.4566-1-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20220224135243.4233-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

In vWQE mode, the mbuf address is calculated without using the
iova list.
Packet length can also be calculated by using NIX_PARSE_S by
which we can completely eliminate reading 2nd cache line
depending on the offloads enabled.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 v2 Changes:
 - Reword commit message.

 drivers/net/cnxk/cn10k_rx.h | 75 +++++++++++++++++++++++++++----------
 1 file changed, 55 insertions(+), 20 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index abf280102b..65a08e379b 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -590,7 +590,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 							*(uint64_t *)args :
 							rxq->mbuf_initializer;
 	const uint64x2_t data_off = flags & NIX_RX_VWQE_F ?
-						  vdupq_n_u64(0x80ULL) :
+					    vdupq_n_u64(RTE_PKTMBUF_HEADROOM) :
 						  vdupq_n_u64(rxq->data_off);
 	const uint32_t qmask = flags & NIX_RX_VWQE_F ? 0 : rxq->qmask;
 	const uint64_t wdata = flags & NIX_RX_VWQE_F ? 0 : rxq->wdata;
@@ -687,6 +687,12 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		cq3_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 3, 64, flags));

 		if (!(flags & NIX_RX_VWQE_F)) {
+			/* Get NIX_RX_SG_S for size and buffer pointer */
+			cq0_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 0, 64, flags));
+			cq1_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 1, 64, flags));
+			cq2_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 2, 64, flags));
+			cq3_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 3, 64, flags));
+
 			/* Extract mbuf from NIX_RX_SG_S */
 			mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
 			mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
@@ -705,21 +711,24 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
 		mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);

-		/* Mask to get packet len from NIX_RX_SG_S */
-		const uint8x16_t shuf_msk = {
-			0xFF, 0xFF, /* pkt_type set as unknown */
-			0xFF, 0xFF, /* pkt_type set as unknown */
-			0,    1,    /* octet 1~0, low 16 bits pkt_len */
-			0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
-			0,    1,    /* octet 1~0, 16 bits data_len */
-			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
-
-		/* Form the rx_descriptor_fields1 with pkt_len and data_len */
-		f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
-		f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
-		f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
-		f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
-
+		if (!(flags & NIX_RX_VWQE_F)) {
+			/* Mask to get packet len from NIX_RX_SG_S */
+			const uint8x16_t shuf_msk = {
+				0xFF, 0xFF, /* pkt_type set as unknown */
+				0xFF, 0xFF, /* pkt_type set as unknown */
+				0,    1,    /* octet 1~0, low 16 bits pkt_len */
+				0xFF, 0xFF, /* skip high 16 bits pkt_len, zero
+					       out */
+				0,    1,    /* octet 1~0, 16 bits data_len */
+				0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+			/* Form the rx_descriptor_fields1 with pkt_len and
+			 * data_len */
+			f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
+			f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
+			f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
+			f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
+		}
 		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
 			/* Prefetch probable CPT parse header area */
 			rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf0, d_off));
@@ -731,12 +740,42 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		/* Load CQE word0 and word 1 */
 		const uint64_t cq0_w0 = *CQE_PTR_OFF(cq0, 0, 0, flags);
 		const uint64_t cq0_w1 = *CQE_PTR_OFF(cq0, 0, 8, flags);
+		const uint64_t cq0_w2 = *CQE_PTR_OFF(cq0, 0, 16, flags);
 		const uint64_t cq1_w0 = *CQE_PTR_OFF(cq0, 1, 0, flags);
 		const uint64_t cq1_w1 = *CQE_PTR_OFF(cq0, 1, 8, flags);
+		const uint64_t cq1_w2 = *CQE_PTR_OFF(cq0, 1, 16, flags);
 		const uint64_t cq2_w0 = *CQE_PTR_OFF(cq0, 2, 0, flags);
 		const uint64_t cq2_w1 = *CQE_PTR_OFF(cq0, 2, 8, flags);
+		const uint64_t cq2_w2 = *CQE_PTR_OFF(cq0, 2, 16, flags);
 		const uint64_t cq3_w0 = *CQE_PTR_OFF(cq0, 3, 0, flags);
 		const uint64_t cq3_w1 = *CQE_PTR_OFF(cq0, 3, 8, flags);
+		const uint64_t cq3_w2 = *CQE_PTR_OFF(cq0, 3, 16, flags);
+
+		if (flags & NIX_RX_VWQE_F) {
+			uint16_t psize0, psize1, psize2, psize3;
+
+			psize0 = (cq0_w2 & 0xFFFF) + 1;
+			psize1 = (cq1_w2 & 0xFFFF) + 1;
+			psize2 = (cq2_w2 & 0xFFFF) + 1;
+			psize3 = (cq3_w2 & 0xFFFF) + 1;
+
+			f0 = vdupq_n_u64(0);
+			f1 = vdupq_n_u64(0);
+			f2 = vdupq_n_u64(0);
+			f3 = vdupq_n_u64(0);
+
+			f0 = vsetq_lane_u16(psize0, f0, 2);
+			f0 = vsetq_lane_u16(psize0, f0, 4);
+
+			f1 = vsetq_lane_u16(psize1, f1, 2);
+			f1 = vsetq_lane_u16(psize1, f1, 4);
+
+			f2 = vsetq_lane_u16(psize2, f2, 2);
+			f2 = vsetq_lane_u16(psize2, f2, 4);
+
+			f3 = vsetq_lane_u16(psize3, f3, 2);
+			f3 = vsetq_lane_u16(psize3, f3, 4);
+		}

 		if (flags & NIX_RX_OFFLOAD_RSS_F) {
 			/* Fill rss in the rx_descriptor_fields1 */
@@ -805,10 +844,6 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		}

 		if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
-			uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
-			uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
-			uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
-			uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);

 			ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
 			ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
--
2.17.1


  parent reply	other threads:[~2022-02-24 16:10 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-24 13:52 [PATCH " pbhagavatula
2022-02-24 13:52 ` [PATCH 2/2] event/cnxk: align perfetchs to CN10K cache model pbhagavatula
2022-02-24 16:10 ` pbhagavatula [this message]
2022-02-24 16:10   ` [PATCH v2 2/2] net/cnxk: " pbhagavatula
2022-02-24 18:40   ` [dpdk-dev] [PATCH v3 1/2] net/cnxk: optimize Rx packet size extraction jerinj
2022-02-24 18:40     ` [dpdk-dev] [PATCH v3 2/2] net/cnxk: align perfetchs to CN10K cache model jerinj
2022-02-24 20:39     ` [dpdk-dev] [PATCH v3 1/2] net/cnxk: optimize Rx packet size extraction Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220224161013.4566-1-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).