DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
	"Kiran Kumar K" <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [dpdk-dev] [PATCH v3 02/13] net/cnxk: enable ptp processing in vector Rx
Date: Mon, 21 Jun 2021 01:58:55 +0530	[thread overview]
Message-ID: <20210620202906.10974-2-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20210620202906.10974-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Enable PTP offload in vector Rx burst function, use vector path
for processing mbufs and finally switch to scalar when extracting
timestamp.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev.c |   1 -
 drivers/net/cnxk/cn10k_rx.c     |   5 +-
 drivers/net/cnxk/cn10k_rx.h     | 124 ++++++++++++++++++++++++++++----
 drivers/net/cnxk/cn10k_rx_vec.c |   3 -
 drivers/net/cnxk/cn9k_ethdev.c  |   1 -
 drivers/net/cnxk/cn9k_rx.c      |   5 +-
 drivers/net/cnxk/cn9k_rx.h      | 124 ++++++++++++++++++++++++++++----
 drivers/net/cnxk/cn9k_rx_vec.c  |   3 -
 drivers/net/cnxk/cnxk_ethdev.h  |  19 ++---
 9 files changed, 232 insertions(+), 53 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index b079edbd35..7caec6cf14 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -301,7 +301,6 @@ nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
 	if (nix_recalc_mtu(eth_dev))
 		plt_err("Failed to set MTU size for ptp");
 
-	dev->scalar_ena = true;
 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	/* Setting up the function pointers as per new offload flags */
diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c
index 3a9fd71309..69e767ac3d 100644
--- a/drivers/net/cnxk/cn10k_rx.c
+++ b/drivers/net/cnxk/cn10k_rx.c
@@ -75,10 +75,7 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 		dev->rx_pkt_burst_no_offload =
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0];
 
-	/* For PTP enabled, scalar rx function should be chosen as most of the
-	 * PTP apps are implemented to rx burst 1 pkt.
-	 */
-	if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (dev->scalar_ena) {
 		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 5926ff7f46..d9572b19e7 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -109,7 +109,7 @@ nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
 
 static __rte_always_inline void
 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
-		    uint64_t rearm)
+		    uint64_t rearm, const uint16_t flags)
 {
 	const rte_iova_t *iova_list;
 	struct rte_mbuf *head;
@@ -125,8 +125,10 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		return;
 	}
 
-	mbuf->pkt_len = rx->pkt_lenm1 + 1;
-	mbuf->data_len = sg & 0xFFFF;
+	mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
+					       CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
+	mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
+					  CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
 	mbuf->nb_segs = nb_segs;
 	sg = sg >> 16;
 
@@ -207,7 +209,7 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
 	*(uint64_t *)(&mbuf->rearm_data) = val;
 
 	if (flag & NIX_RX_MULTI_SEG_F)
-		nix_cqe_xtract_mseg(rx, mbuf, val);
+		nix_cqe_xtract_mseg(rx, mbuf, val, flag);
 	else
 		mbuf->next = NULL;
 }
@@ -272,8 +274,9 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 				      flags);
 		cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
 					(flags & NIX_RX_OFFLOAD_TSTAMP_F),
-					(uint64_t *)((uint8_t *)mbuf + data_off)
-					);
+					(flags & NIX_RX_MULTI_SEG_F),
+					(uint64_t *)((uint8_t *)mbuf
+								+ data_off));
 		rx_pkts[packets++] = mbuf;
 		roc_prefetch_store_keep(mbuf);
 		head++;
@@ -469,6 +472,99 @@ cn10k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
 				mbuf3);
 		}
 
+		if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
+			const uint16x8_t len_off = {
+				0,			     /* ptype   0:15 */
+				0,			     /* ptype  16:32 */
+				CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen  0:15*/
+				0,			     /* pktlen 16:32 */
+				CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
+				0,
+				0,
+				0};
+			const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
+						  RTE_PTYPE_L2_ETHER_TIMESYNC,
+						  RTE_PTYPE_L2_ETHER_TIMESYNC,
+						  RTE_PTYPE_L2_ETHER_TIMESYNC};
+			const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
+						PKT_RX_IEEE1588_TMST |
+						rxq->tstamp->rx_tstamp_dynflag;
+			const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
+			uint64x2_t ts01, ts23, mask;
+			uint64_t ts[4];
+			uint8_t res;
+
+			/* Subtract timesync length from total pkt length. */
+			f0 = vsubq_u16(f0, len_off);
+			f1 = vsubq_u16(f1, len_off);
+			f2 = vsubq_u16(f2, len_off);
+			f3 = vsubq_u16(f3, len_off);
+
+			/* Get the address of actual timestamp. */
+			ts01 = vaddq_u64(mbuf01, data_off);
+			ts23 = vaddq_u64(mbuf23, data_off);
+			/* Load timestamp from address. */
+			ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
+									  0),
+					      ts01, 0);
+			ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
+									  1),
+					      ts01, 1);
+			ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
+									  0),
+					      ts23, 0);
+			ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
+									  1),
+					      ts23, 1);
+			/* Convert from be to cpu byteorder. */
+			ts01 = vrev64q_u8(ts01);
+			ts23 = vrev64q_u8(ts23);
+			/* Store timestamp into scalar for later use. */
+			ts[0] = vgetq_lane_u64(ts01, 0);
+			ts[1] = vgetq_lane_u64(ts01, 1);
+			ts[2] = vgetq_lane_u64(ts23, 0);
+			ts[3] = vgetq_lane_u64(ts23, 1);
+
+			/* Store timestamp into dynfield. */
+			*cnxk_nix_timestamp_dynfield(mbuf0, rxq->tstamp) =
+				ts[0];
+			*cnxk_nix_timestamp_dynfield(mbuf1, rxq->tstamp) =
+				ts[1];
+			*cnxk_nix_timestamp_dynfield(mbuf2, rxq->tstamp) =
+				ts[2];
+			*cnxk_nix_timestamp_dynfield(mbuf3, rxq->tstamp) =
+				ts[3];
+
+			/* Generate ptype mask to filter L2 ether timesync */
+			mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
+			mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
+			mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
+			mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
+
+			/* Match against L2 ether timesync. */
+			mask = vceqq_u32(mask, ptype);
+			/* Convert from vector from scalar mask */
+			res = vaddvq_u32(vandq_u32(mask, and_mask));
+			res &= 0xF;
+
+			if (res) {
+				/* Fill in the ol_flags for any packets that
+				 * matched.
+				 */
+				ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
+				ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
+				ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
+				ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
+
+				/* Update Rxq timestamp with the latest
+				 * timestamp.
+				 */
+				rxq->tstamp->rx_ready = 1;
+				rxq->tstamp->rx_tstamp =
+					ts[31 - __builtin_clz(res)];
+			}
+		}
+
 		/* Form rearm_data with ol_flags */
 		rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
 		rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
@@ -496,17 +592,17 @@ cn10k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
 			 * individual mbufs in scalar mode.
 			 */
 			nix_cqe_xtract_mseg((union nix_rx_parse_u *)
-					    (cq0 + CQE_SZ(0) + 8), mbuf0,
-					    mbuf_initializer);
+						(cq0 + CQE_SZ(0) + 8), mbuf0,
+					    mbuf_initializer, flags);
 			nix_cqe_xtract_mseg((union nix_rx_parse_u *)
-					    (cq0 + CQE_SZ(1) + 8), mbuf1,
-					    mbuf_initializer);
+						(cq0 + CQE_SZ(1) + 8), mbuf1,
+					    mbuf_initializer, flags);
 			nix_cqe_xtract_mseg((union nix_rx_parse_u *)
-					    (cq0 + CQE_SZ(2) + 8), mbuf2,
-					    mbuf_initializer);
+						(cq0 + CQE_SZ(2) + 8), mbuf2,
+					    mbuf_initializer, flags);
 			nix_cqe_xtract_mseg((union nix_rx_parse_u *)
-					    (cq0 + CQE_SZ(3) + 8), mbuf3,
-					    mbuf_initializer);
+						(cq0 + CQE_SZ(3) + 8), mbuf3,
+					    mbuf_initializer, flags);
 		} else {
 			/* Update that no more segments */
 			mbuf0->next = NULL;
diff --git a/drivers/net/cnxk/cn10k_rx_vec.c b/drivers/net/cnxk/cn10k_rx_vec.c
index 65ffa97841..93528a44f9 100644
--- a/drivers/net/cnxk/cn10k_rx_vec.c
+++ b/drivers/net/cnxk/cn10k_rx_vec.c
@@ -11,9 +11,6 @@
 					       struct rte_mbuf **rx_pkts,      \
 					       uint16_t pkts)                  \
 	{                                                                      \
-		/* TSTMP is not supported by vector */                         \
-		if ((flags) & NIX_RX_OFFLOAD_TSTAMP_F)                         \
-			return 0;                                              \
 		return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts,     \
 						  (flags));		       \
 	}
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 107a540915..cb302b75d8 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -309,7 +309,6 @@ nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
 	if (nix_recalc_mtu(eth_dev))
 		plt_err("Failed to set MTU size for ptp");
 
-	dev->scalar_ena = true;
 	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
 
 	/* Setting up the function pointers as per new offload flags */
diff --git a/drivers/net/cnxk/cn9k_rx.c b/drivers/net/cnxk/cn9k_rx.c
index d293d4eac3..7d9f1bd61f 100644
--- a/drivers/net/cnxk/cn9k_rx.c
+++ b/drivers/net/cnxk/cn9k_rx.c
@@ -75,10 +75,7 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 		dev->rx_pkt_burst_no_offload =
 			nix_eth_rx_burst_mseg[0][0][0][0][0][0];
 
-	/* For PTP enabled, scalar rx function should be chosen as most of the
-	 * PTP apps are implemented to rx burst 1 pkt.
-	 */
-	if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+	if (dev->scalar_ena) {
 		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
 			return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
 		return pick_rx_func(eth_dev, nix_eth_rx_burst);
diff --git a/drivers/net/cnxk/cn9k_rx.h b/drivers/net/cnxk/cn9k_rx.h
index 5ae9e8195c..beb52f39d5 100644
--- a/drivers/net/cnxk/cn9k_rx.h
+++ b/drivers/net/cnxk/cn9k_rx.h
@@ -110,7 +110,7 @@ nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
 
 static __rte_always_inline void
 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
-		    uint64_t rearm)
+		    uint64_t rearm, const uint16_t flags)
 {
 	const rte_iova_t *iova_list;
 	struct rte_mbuf *head;
@@ -126,8 +126,10 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		return;
 	}
 
-	mbuf->pkt_len = rx->pkt_lenm1 + 1;
-	mbuf->data_len = sg & 0xFFFF;
+	mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
+					       CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
+	mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
+					  CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
 	mbuf->nb_segs = nb_segs;
 	sg = sg >> 16;
 
@@ -210,7 +212,7 @@ cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
 	*(uint64_t *)(&mbuf->rearm_data) = val;
 
 	if (flag & NIX_RX_MULTI_SEG_F)
-		nix_cqe_xtract_mseg(rx, mbuf, val);
+		nix_cqe_xtract_mseg(rx, mbuf, val, flag);
 	else
 		mbuf->next = NULL;
 }
@@ -275,8 +277,9 @@ cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 				     flags);
 		cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
 					(flags & NIX_RX_OFFLOAD_TSTAMP_F),
-					(uint64_t *)((uint8_t *)mbuf + data_off)
-					);
+					(flags & NIX_RX_MULTI_SEG_F),
+					(uint64_t *)((uint8_t *)mbuf
+								+ data_off));
 		rx_pkts[packets++] = mbuf;
 		roc_prefetch_store_keep(mbuf);
 		head++;
@@ -472,6 +475,99 @@ cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
 				mbuf3);
 		}
 
+		if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
+			const uint16x8_t len_off = {
+				0,			     /* ptype   0:15 */
+				0,			     /* ptype  16:32 */
+				CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen  0:15*/
+				0,			     /* pktlen 16:32 */
+				CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
+				0,
+				0,
+				0};
+			const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
+						  RTE_PTYPE_L2_ETHER_TIMESYNC,
+						  RTE_PTYPE_L2_ETHER_TIMESYNC,
+						  RTE_PTYPE_L2_ETHER_TIMESYNC};
+			const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
+						PKT_RX_IEEE1588_TMST |
+						rxq->tstamp->rx_tstamp_dynflag;
+			const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
+			uint64x2_t ts01, ts23, mask;
+			uint64_t ts[4];
+			uint8_t res;
+
+			/* Subtract timesync length from total pkt length. */
+			f0 = vsubq_u16(f0, len_off);
+			f1 = vsubq_u16(f1, len_off);
+			f2 = vsubq_u16(f2, len_off);
+			f3 = vsubq_u16(f3, len_off);
+
+			/* Get the address of actual timestamp. */
+			ts01 = vaddq_u64(mbuf01, data_off);
+			ts23 = vaddq_u64(mbuf23, data_off);
+			/* Load timestamp from address. */
+			ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
+									  0),
+					      ts01, 0);
+			ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
+									  1),
+					      ts01, 1);
+			ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
+									  0),
+					      ts23, 0);
+			ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
+									  1),
+					      ts23, 1);
+			/* Convert from be to cpu byteorder. */
+			ts01 = vrev64q_u8(ts01);
+			ts23 = vrev64q_u8(ts23);
+			/* Store timestamp into scalar for later use. */
+			ts[0] = vgetq_lane_u64(ts01, 0);
+			ts[1] = vgetq_lane_u64(ts01, 1);
+			ts[2] = vgetq_lane_u64(ts23, 0);
+			ts[3] = vgetq_lane_u64(ts23, 1);
+
+			/* Store timestamp into dynfield. */
+			*cnxk_nix_timestamp_dynfield(mbuf0, rxq->tstamp) =
+				ts[0];
+			*cnxk_nix_timestamp_dynfield(mbuf1, rxq->tstamp) =
+				ts[1];
+			*cnxk_nix_timestamp_dynfield(mbuf2, rxq->tstamp) =
+				ts[2];
+			*cnxk_nix_timestamp_dynfield(mbuf3, rxq->tstamp) =
+				ts[3];
+
+			/* Generate ptype mask to filter L2 ether timesync */
+			mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
+			mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
+			mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
+			mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
+
+			/* Match against L2 ether timesync. */
+			mask = vceqq_u32(mask, ptype);
+			/* Convert from vector from scalar mask */
+			res = vaddvq_u32(vandq_u32(mask, and_mask));
+			res &= 0xF;
+
+			if (res) {
+				/* Fill in the ol_flags for any packets that
+				 * matched.
+				 */
+				ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
+				ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
+				ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
+				ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
+
+				/* Update Rxq timestamp with the latest
+				 * timestamp.
+				 */
+				rxq->tstamp->rx_ready = 1;
+				rxq->tstamp->rx_tstamp =
+					ts[31 - __builtin_clz(res)];
+			}
+		}
+
 		/* Form rearm_data with ol_flags */
 		rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
 		rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
@@ -499,17 +595,17 @@ cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
 			 * individual mbufs in scalar mode.
 			 */
 			nix_cqe_xtract_mseg((union nix_rx_parse_u *)
-					    (cq0 + CQE_SZ(0) + 8), mbuf0,
-					    mbuf_initializer);
+						(cq0 + CQE_SZ(0) + 8), mbuf0,
+					    mbuf_initializer, flags);
 			nix_cqe_xtract_mseg((union nix_rx_parse_u *)
-					    (cq0 + CQE_SZ(1) + 8), mbuf1,
-					    mbuf_initializer);
+						(cq0 + CQE_SZ(1) + 8), mbuf1,
+					    mbuf_initializer, flags);
 			nix_cqe_xtract_mseg((union nix_rx_parse_u *)
-					    (cq0 + CQE_SZ(2) + 8), mbuf2,
-					    mbuf_initializer);
+						(cq0 + CQE_SZ(2) + 8), mbuf2,
+					    mbuf_initializer, flags);
 			nix_cqe_xtract_mseg((union nix_rx_parse_u *)
-					    (cq0 + CQE_SZ(3) + 8), mbuf3,
-					    mbuf_initializer);
+						(cq0 + CQE_SZ(3) + 8), mbuf3,
+					    mbuf_initializer, flags);
 		} else {
 			/* Update that no more segments */
 			mbuf0->next = NULL;
diff --git a/drivers/net/cnxk/cn9k_rx_vec.c b/drivers/net/cnxk/cn9k_rx_vec.c
index e61c2225c6..ef5f771ef7 100644
--- a/drivers/net/cnxk/cn9k_rx_vec.c
+++ b/drivers/net/cnxk/cn9k_rx_vec.c
@@ -9,9 +9,6 @@
 	uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name(       \
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)      \
 	{                                                                      \
-		/* TSTMP is not supported by vector */                         \
-		if ((flags) & NIX_RX_OFFLOAD_TSTAMP_F)                         \
-			return 0;                                              \
 		return cn9k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts,      \
 						 (flags));                     \
 	}
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 67b1f42531..4eead03905 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -136,13 +136,12 @@ struct cnxk_eth_qconf {
 };
 
 struct cnxk_timesync_info {
+	uint8_t rx_ready;
+	uint64_t rx_tstamp;
 	uint64_t rx_tstamp_dynflag;
+	int tstamp_dynfield_offset;
 	rte_iova_t tx_tstamp_iova;
 	uint64_t *tx_tstamp;
-	uint64_t rx_tstamp;
-	int tstamp_dynfield_offset;
-	uint8_t tx_ready;
-	uint8_t rx_ready;
 } __plt_cache_aligned;
 
 struct cnxk_eth_dev {
@@ -465,13 +464,15 @@ cnxk_nix_timestamp_dynfield(struct rte_mbuf *mbuf,
 
 static __rte_always_inline void
 cnxk_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
-			struct cnxk_timesync_info *tstamp, bool ts_enable,
+			struct cnxk_timesync_info *tstamp,
+			const uint8_t ts_enable, const uint8_t mseg_enable,
 			uint64_t *tstamp_ptr)
 {
-	if (ts_enable &&
-	    (mbuf->data_off ==
-	     RTE_PKTMBUF_HEADROOM + CNXK_NIX_TIMESYNC_RX_OFFSET)) {
-		mbuf->pkt_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
+	if (ts_enable) {
+		if (!mseg_enable) {
+			mbuf->pkt_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
+			mbuf->data_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
+		}
 
 		/* Reading the rx timestamp inserted by CGX, viz at
 		 * starting of the packet data.
-- 
2.17.1


  reply	other threads:[~2021-06-20 20:29 UTC|newest]

Thread overview: 93+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-24 12:22 [dpdk-dev] [PATCH v2 1/4] event/cnxk: add Rx adapter support pbhagavatula
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 2/4] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 3/4] event/cnxk: add Tx adapter support pbhagavatula
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 4/4] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-19 11:01 ` [dpdk-dev] [PATCH v2 01/13] net/cnxk: add multi seg Rx vector routine pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 02/13] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 03/13] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 04/13] net/cnxk: enable ptp " pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 05/13] net/cnxk: enable TSO " pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 06/13] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 07/13] event/cnxk: add Rx adapter support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 08/13] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 09/13] event/cnxk: add Tx adapter support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 10/13] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 11/13] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 12/13] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 13/13] event/cnxk: add Tx " pbhagavatula
2021-06-20 20:28   ` [dpdk-dev] [PATCH v3 01/13] net/cnxk: add multi seg Rx vector routine pbhagavatula
2021-06-20 20:28     ` pbhagavatula [this message]
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 03/13] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 04/13] net/cnxk: enable ptp " pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 05/13] net/cnxk: enable TSO " pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 06/13] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 07/13] event/cnxk: add Rx adapter support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 08/13] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 09/13] event/cnxk: add Tx adapter support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 10/13] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 11/13] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 12/13] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 13/13] event/cnxk: add Tx " pbhagavatula
2021-06-27  6:57     ` [dpdk-dev] [PATCH v3 01/13] net/cnxk: add multi seg Rx vector routine Jerin Jacob
2021-06-28 19:41     ` [dpdk-dev] [PATCH v4 1/6] " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 2/6] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 3/6] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 4/6] net/cnxk: enable ptp " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 5/6] net/cnxk: enable TSO " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 6/6] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-29  7:25         ` Nithin Dabilpuram
2021-06-29  7:44       ` [dpdk-dev] [PATCH v5 1/6] net/cnxk: add multi seg Rx " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 2/6] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 3/6] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 4/6] net/cnxk: enable ptp " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 5/6] net/cnxk: enable TSO " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 6/6] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-29 16:20         ` [dpdk-dev] [PATCH v5 1/6] net/cnxk: add multi seg Rx " Jerin Jacob
2021-06-28 19:52     ` [dpdk-dev] [PATCH v4 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 7/7] event/cnxk: add Tx " pbhagavatula
2021-06-29  8:01       ` [dpdk-dev] [PATCH v5 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-02 21:14         ` [dpdk-dev] [PATCH v6 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-03 13:23             ` Nithin Dabilpuram
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-03 22:00           ` [dpdk-dev] [PATCH v7 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-11 23:29             ` [dpdk-dev] [PATCH v8 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-13 13:36                 ` Jerin Jacob
2021-07-14  9:02               ` [dpdk-dev] [PATCH v9 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-20 11:03                   ` David Marchand
2021-07-20 11:43                     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-07-20 11:50                       ` David Marchand
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-16 12:19                   ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210620202906.10974-2-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).