DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 3/3] net/iavf: support Rx timestamp offload on SSE
@ 2023-04-10  7:36 Zhichao Zeng
  2023-04-12  6:50 ` [PATCH v2 " Zhichao Zeng
  2023-04-12  8:46 ` Zhichao Zeng
  0 siblings, 2 replies; 9+ messages in thread
From: Zhichao Zeng @ 2023-04-10  7:36 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, yaqi.tang, Zhichao Zeng, Bruce Richardson,
	Konstantin Ananyev, Jingjing Wu, Beilei Xing

This patch enables Rx timestamp offload on SSE data path.

Enable timestamp offload with the command '--enable-rx-timestamp',
pay attention that getting Rx timestamp offload will drop the performance.

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
---
 drivers/net/iavf/iavf_rxtx_vec_sse.c | 163 ++++++++++++++++++++++++++-
 1 file changed, 159 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 3f30be01aa..a627fb39a1 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -392,6 +392,11 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
 			_mm_extract_epi32(fdir_id0_3, 3);
 	} /* if() on fdir_enabled */
 
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		flags = _mm_or_si128(flags, _mm_set1_epi32(iavf_timestamp_dynflag));
+#endif
+
 	/**
 	 * At this point, we have the 4 sets of flags in the low 16-bits
 	 * of each 32-bit value in flags.
@@ -793,6 +798,24 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	      rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	uint8_t inflection_point;
+	bool is_tsinit = false;
+	__m128i hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+		if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) {
+			hw_low_last = _mm_setzero_si128();
+			is_tsinit = 1;
+		} else {
+			hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+		}
+	}
+
+#endif
+
 	/**
 	 * Compile-time verify the shuffle mask
 	 * NOTE: some field positions already verified above, but duplicated
@@ -895,11 +918,12 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 		/**
-		 * needs to load 2nd 16B of each desc for RSS hash parsing,
+		 * needs to load 2nd 16B of each desc,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
-			rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+		if (offloads & (RTE_ETH_RX_OFFLOAD_RSS_HASH |
+					RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
+				rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			descs_bh[3] = _mm_load_si128
 					((void *)(&rxdp[3].wb.status_error1));
@@ -964,7 +988,94 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 			pkt_mb2 = _mm_or_si128(pkt_mb2, vlan_tci2);
 			pkt_mb1 = _mm_or_si128(pkt_mb1, vlan_tci1);
 			pkt_mb0 = _mm_or_si128(pkt_mb0, vlan_tci0);
-		}
+		} /* if() on Vlan parsing */
+
+		if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t mask = 0xFFFFFFFF;
+			__m128i ts;
+			__m128i ts_low = _mm_setzero_si128();
+			__m128i ts_low1;
+			__m128i max_ret;
+			__m128i cmp_ret;
+			uint8_t ret = 0;
+			uint8_t shift = 4;
+			__m128i ts_desp_mask = _mm_set_epi32(mask, 0, 0, 0);
+			__m128i cmp_mask = _mm_set1_epi32(mask);
+
+			ts = _mm_and_si128(descs_bh[0], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 3 * 4));
+			ts = _mm_and_si128(descs_bh[1], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 2 * 4));
+			ts = _mm_and_si128(descs_bh[2], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 1 * 4));
+			ts = _mm_and_si128(descs_bh[3], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, ts);
+
+			ts_low1 = _mm_slli_si128(ts_low, 4);
+			ts_low1 = _mm_and_si128(ts_low, _mm_set_epi32(mask, mask, mask, 0));
+			ts_low1 = _mm_or_si128(ts_low1, hw_low_last);
+			hw_low_last = _mm_and_si128(ts_low, _mm_set_epi32(0, 0, 0, mask));
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 0);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 1);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 2);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 3);
+
+			if (unlikely(is_tsinit)) {
+				uint32_t in_timestamp;
+
+				if (iavf_get_phc_time(rxq))
+					PMD_DRV_LOG(ERR, "get physical time failed");
+				in_timestamp = *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+							iavf_timestamp_dynfield_offset, uint32_t *);
+				rxq->phc_time = iavf_tstamp_convert_32b_64b(rxq->phc_time, in_timestamp);
+			}
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+
+			max_ret = _mm_max_epu32(ts_low, ts_low1);
+			cmp_ret = _mm_andnot_si128(_mm_cmpeq_epi32(max_ret, ts_low1), cmp_mask);
+
+			if (_mm_testz_si128(cmp_ret, cmp_mask)) {
+				inflection_point = 0;
+			} else {
+				inflection_point = 1;
+				while (shift > 1) {
+					shift = shift >> 1;
+					__m128i mask_low;
+					__m128i mask_high;
+					switch (shift) {
+					case 2:
+						mask_low = _mm_set_epi32(0, 0, mask, mask);
+						mask_high = _mm_set_epi32(mask, mask, 0, 0);
+						break;
+					case 1:
+						mask_low = _mm_srli_si128(cmp_mask, 4);
+						mask_high = _mm_slli_si128(cmp_mask, 4);
+						break;
+					}
+					ret = _mm_testz_si128(cmp_ret, mask_low);
+					if (ret) {
+						ret = _mm_testz_si128(cmp_ret, mask_high);
+						inflection_point += ret ? 0 : shift;
+						cmp_mask = mask_high;
+					} else {
+						cmp_mask = mask_low;
+					}
+				}
+			}
+		} /* if() on Timestamp parsing */
 
 		flex_desc_to_olflags_v(rxq, descs, descs_bh, &rx_pkts[pos]);
 #else
@@ -1011,10 +1122,54 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		/* C.4 calc available number of desc */
 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
 		nb_pkts_recd += var;
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			inflection_point = (inflection_point <= var) ? inflection_point : 0;
+			switch (inflection_point) {
+			case 1:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+				break;
+			case 2:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+				break;
+			case 3:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+				break;
+			case 4:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+				rxq->phc_time += (uint64_t)1 << 32;
+				break;
+			case 0:
+				break;
+			default:
+				printf("invalid inflection point for rx timestamp\n");
+				break;
+			}
+
+			rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		}
+#pragma GCC diagnostic pop
+#endif
+
 		if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
 			break;
 	}
 
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+	if (nb_pkts_recd > 0 && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
+		rxq->phc_time = *RTE_MBUF_DYNFIELD(rx_pkts[nb_pkts_recd - 1],
+						iavf_timestamp_dynfield_offset, uint32_t *);
+#endif
+#endif
+
 	/* Update our internal tail pointer */
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 3/3] net/iavf: support Rx timestamp offload on SSE
  2023-04-10  7:36 [PATCH 3/3] net/iavf: support Rx timestamp offload on SSE Zhichao Zeng
@ 2023-04-12  6:50 ` Zhichao Zeng
  2023-04-26  7:46   ` Tang, Yaqi
  2023-04-12  8:46 ` Zhichao Zeng
  1 sibling, 1 reply; 9+ messages in thread
From: Zhichao Zeng @ 2023-04-12  6:50 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, yaqi.tang, yingyax.han, Zhichao Zeng,
	Bruce Richardson, Konstantin Ananyev, Jingjing Wu, Beilei Xing

This patch enables Rx timestamp offload on SSE data path.

Enable timestamp offload with the command '--enable-rx-timestamp',
pay attention that getting Rx timestamp offload will drop the performance.

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

---
v2: fix compile warning and timestamp error
---
 drivers/net/iavf/iavf_rxtx_vec_sse.c | 159 ++++++++++++++++++++++++++-
 1 file changed, 155 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 3f30be01aa..54bb5033ee 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -392,6 +392,11 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
 			_mm_extract_epi32(fdir_id0_3, 3);
 	} /* if() on fdir_enabled */
 
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		flags = _mm_or_si128(flags, _mm_set1_epi32(iavf_timestamp_dynflag));
+#endif
+
 	/**
 	 * At this point, we have the 4 sets of flags in the low 16-bits
 	 * of each 32-bit value in flags.
@@ -793,6 +798,24 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	      rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	uint8_t inflection_point = 0;
+	bool is_tsinit = false;
+	__m128i hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+		if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) {
+			hw_low_last = _mm_setzero_si128();
+			is_tsinit = 1;
+		} else {
+			hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+		}
+	}
+
+#endif
+
 	/**
 	 * Compile-time verify the shuffle mask
 	 * NOTE: some field positions already verified above, but duplicated
@@ -895,11 +918,12 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 		/**
-		 * needs to load 2nd 16B of each desc for RSS hash parsing,
+		 * needs to load 2nd 16B of each desc,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
-			rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+		if (offloads & (RTE_ETH_RX_OFFLOAD_RSS_HASH |
+					RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
+				rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			descs_bh[3] = _mm_load_si128
 					((void *)(&rxdp[3].wb.status_error1));
@@ -964,7 +988,94 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 			pkt_mb2 = _mm_or_si128(pkt_mb2, vlan_tci2);
 			pkt_mb1 = _mm_or_si128(pkt_mb1, vlan_tci1);
 			pkt_mb0 = _mm_or_si128(pkt_mb0, vlan_tci0);
-		}
+		} /* if() on Vlan parsing */
+
+		if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t mask = 0xFFFFFFFF;
+			__m128i ts;
+			__m128i ts_low = _mm_setzero_si128();
+			__m128i ts_low1;
+			__m128i max_ret;
+			__m128i cmp_ret;
+			uint8_t ret = 0;
+			uint8_t shift = 4;
+			__m128i ts_desp_mask = _mm_set_epi32(mask, 0, 0, 0);
+			__m128i cmp_mask = _mm_set1_epi32(mask);
+
+			ts = _mm_and_si128(descs_bh[0], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 3 * 4));
+			ts = _mm_and_si128(descs_bh[1], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 2 * 4));
+			ts = _mm_and_si128(descs_bh[2], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 1 * 4));
+			ts = _mm_and_si128(descs_bh[3], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, ts);
+
+			ts_low1 = _mm_slli_si128(ts_low, 4);
+			ts_low1 = _mm_and_si128(ts_low, _mm_set_epi32(mask, mask, mask, 0));
+			ts_low1 = _mm_or_si128(ts_low1, hw_low_last);
+			hw_low_last = _mm_and_si128(ts_low, _mm_set_epi32(0, 0, 0, mask));
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 0);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 1);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 2);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 3);
+
+			if (unlikely(is_tsinit)) {
+				uint32_t in_timestamp;
+
+				if (iavf_get_phc_time(rxq))
+					PMD_DRV_LOG(ERR, "get physical time failed");
+				in_timestamp = *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+							iavf_timestamp_dynfield_offset, uint32_t *);
+				rxq->phc_time = iavf_tstamp_convert_32b_64b(rxq->phc_time, in_timestamp);
+			}
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+
+			max_ret = _mm_max_epu32(ts_low, ts_low1);
+			cmp_ret = _mm_andnot_si128(_mm_cmpeq_epi32(max_ret, ts_low), cmp_mask);
+
+			if (_mm_testz_si128(cmp_ret, cmp_mask)) {
+				inflection_point = 0;
+			} else {
+				inflection_point = 1;
+				while (shift > 1) {
+					shift = shift >> 1;
+					__m128i mask_low;
+					__m128i mask_high;
+					switch (shift) {
+					case 2:
+						mask_low = _mm_set_epi32(0, 0, mask, mask);
+						mask_high = _mm_set_epi32(mask, mask, 0, 0);
+						break;
+					case 1:
+						mask_low = _mm_srli_si128(cmp_mask, 4);
+						mask_high = _mm_slli_si128(cmp_mask, 4);
+						break;
+					}
+					ret = _mm_testz_si128(cmp_ret, mask_low);
+					if (ret) {
+						ret = _mm_testz_si128(cmp_ret, mask_high);
+						inflection_point += ret ? 0 : shift;
+						cmp_mask = mask_high;
+					} else {
+						cmp_mask = mask_low;
+					}
+				}
+			}
+		} /* if() on Timestamp parsing */
 
 		flex_desc_to_olflags_v(rxq, descs, descs_bh, &rx_pkts[pos]);
 #else
@@ -1011,10 +1122,50 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		/* C.4 calc available number of desc */
 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
 		nb_pkts_recd += var;
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			inflection_point = (inflection_point <= var) ? inflection_point : 0;
+			switch (inflection_point) {
+			case 1:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 2:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 3:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 4:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+				rxq->phc_time += (uint64_t)1 << 32;
+			case 0:
+				break;
+			default:
+				printf("invalid inflection point for rx timestamp\n");
+				break;
+			}
+
+			rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		}
+#pragma GCC diagnostic pop
+#endif
+
 		if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
 			break;
 	}
 
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+	if (nb_pkts_recd > 0 && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
+		rxq->phc_time = *RTE_MBUF_DYNFIELD(rx_pkts[nb_pkts_recd - 1],
+						iavf_timestamp_dynfield_offset, uint32_t *);
+#endif
+#endif
+
 	/* Update our internal tail pointer */
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 3/3] net/iavf: support Rx timestamp offload on SSE
  2023-04-10  7:36 [PATCH 3/3] net/iavf: support Rx timestamp offload on SSE Zhichao Zeng
  2023-04-12  6:50 ` [PATCH v2 " Zhichao Zeng
@ 2023-04-12  8:46 ` Zhichao Zeng
  2023-04-26 15:31   ` David Marchand
  2023-04-27  3:13   ` [PATCH v3 " Zhichao Zeng
  1 sibling, 2 replies; 9+ messages in thread
From: Zhichao Zeng @ 2023-04-12  8:46 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, yaqi.tang, yingyax.han, Zhichao Zeng,
	Bruce Richardson, Konstantin Ananyev, Jingjing Wu, Beilei Xing

This patch enables Rx timestamp offload on SSE data path.

Enable timestamp offload with the command '--enable-rx-timestamp',
pay attention that getting Rx timestamp offload will drop the performance.

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

---
v2: fix compile warning and timestamp error
---
 drivers/net/iavf/iavf_rxtx_vec_sse.c | 161 ++++++++++++++++++++++++++-
 1 file changed, 157 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 3f30be01aa..f01fda1ec8 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -392,6 +392,11 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
 			_mm_extract_epi32(fdir_id0_3, 3);
 	} /* if() on fdir_enabled */
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		flags = _mm_or_si128(flags, _mm_set1_epi32(iavf_timestamp_dynflag));
+#endif
+
 	/**
 	 * At this point, we have the 4 sets of flags in the low 16-bits
 	 * of each 32-bit value in flags.
@@ -723,7 +728,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	int pos;
 	uint64_t var;
 	struct iavf_adapter *adapter = rxq->vsi->adapter;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+#endif
 	const uint32_t *ptype_tbl = adapter->ptype_tbl;
 	__m128i crc_adjust = _mm_set_epi16
 				(0, 0, 0,       /* ignore non-length fields */
@@ -793,6 +800,24 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	      rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	uint8_t inflection_point = 0;
+	bool is_tsinit = false;
+	__m128i hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+		if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) {
+			hw_low_last = _mm_setzero_si128();
+			is_tsinit = 1;
+		} else {
+			hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+		}
+	}
+
+#endif
+
 	/**
 	 * Compile-time verify the shuffle mask
 	 * NOTE: some field positions already verified above, but duplicated
@@ -895,11 +920,12 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 		/**
-		 * needs to load 2nd 16B of each desc for RSS hash parsing,
+		 * needs to load 2nd 16B of each desc,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
-			rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+		if (offloads & (RTE_ETH_RX_OFFLOAD_RSS_HASH |
+					RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
+				rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			descs_bh[3] = _mm_load_si128
 					((void *)(&rxdp[3].wb.status_error1));
@@ -964,7 +990,94 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 			pkt_mb2 = _mm_or_si128(pkt_mb2, vlan_tci2);
 			pkt_mb1 = _mm_or_si128(pkt_mb1, vlan_tci1);
 			pkt_mb0 = _mm_or_si128(pkt_mb0, vlan_tci0);
-		}
+		} /* if() on Vlan parsing */
+
+		if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t mask = 0xFFFFFFFF;
+			__m128i ts;
+			__m128i ts_low = _mm_setzero_si128();
+			__m128i ts_low1;
+			__m128i max_ret;
+			__m128i cmp_ret;
+			uint8_t ret = 0;
+			uint8_t shift = 4;
+			__m128i ts_desp_mask = _mm_set_epi32(mask, 0, 0, 0);
+			__m128i cmp_mask = _mm_set1_epi32(mask);
+
+			ts = _mm_and_si128(descs_bh[0], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 3 * 4));
+			ts = _mm_and_si128(descs_bh[1], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 2 * 4));
+			ts = _mm_and_si128(descs_bh[2], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 1 * 4));
+			ts = _mm_and_si128(descs_bh[3], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, ts);
+
+			ts_low1 = _mm_slli_si128(ts_low, 4);
+			ts_low1 = _mm_and_si128(ts_low, _mm_set_epi32(mask, mask, mask, 0));
+			ts_low1 = _mm_or_si128(ts_low1, hw_low_last);
+			hw_low_last = _mm_and_si128(ts_low, _mm_set_epi32(0, 0, 0, mask));
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 0);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 1);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 2);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 3);
+
+			if (unlikely(is_tsinit)) {
+				uint32_t in_timestamp;
+
+				if (iavf_get_phc_time(rxq))
+					PMD_DRV_LOG(ERR, "get physical time failed");
+				in_timestamp = *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+							iavf_timestamp_dynfield_offset, uint32_t *);
+				rxq->phc_time = iavf_tstamp_convert_32b_64b(rxq->phc_time, in_timestamp);
+			}
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+
+			max_ret = _mm_max_epu32(ts_low, ts_low1);
+			cmp_ret = _mm_andnot_si128(_mm_cmpeq_epi32(max_ret, ts_low), cmp_mask);
+
+			if (_mm_testz_si128(cmp_ret, cmp_mask)) {
+				inflection_point = 0;
+			} else {
+				inflection_point = 1;
+				while (shift > 1) {
+					shift = shift >> 1;
+					__m128i mask_low;
+					__m128i mask_high;
+					switch (shift) {
+					case 2:
+						mask_low = _mm_set_epi32(0, 0, mask, mask);
+						mask_high = _mm_set_epi32(mask, mask, 0, 0);
+						break;
+					case 1:
+						mask_low = _mm_srli_si128(cmp_mask, 4);
+						mask_high = _mm_slli_si128(cmp_mask, 4);
+						break;
+					}
+					ret = _mm_testz_si128(cmp_ret, mask_low);
+					if (ret) {
+						ret = _mm_testz_si128(cmp_ret, mask_high);
+						inflection_point += ret ? 0 : shift;
+						cmp_mask = mask_high;
+					} else {
+						cmp_mask = mask_low;
+					}
+				}
+			}
+		} /* if() on Timestamp parsing */
 
 		flex_desc_to_olflags_v(rxq, descs, descs_bh, &rx_pkts[pos]);
 #else
@@ -1011,10 +1124,50 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		/* C.4 calc available number of desc */
 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
 		nb_pkts_recd += var;
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			inflection_point = (inflection_point <= var) ? inflection_point : 0;
+			switch (inflection_point) {
+			case 1:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 2:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 3:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 4:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+				rxq->phc_time += (uint64_t)1 << 32;
+			case 0:
+				break;
+			default:
+				printf("invalid inflection point for rx timestamp\n");
+				break;
+			}
+
+			rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		}
+#pragma GCC diagnostic pop
+#endif
+
 		if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
 			break;
 	}
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+	if (nb_pkts_recd > 0 && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
+		rxq->phc_time = *RTE_MBUF_DYNFIELD(rx_pkts[nb_pkts_recd - 1],
+						iavf_timestamp_dynfield_offset, uint32_t *);
+#endif
+#endif
+
 	/* Update our internal tail pointer */
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH v2 3/3] net/iavf: support Rx timestamp offload on SSE
  2023-04-12  6:50 ` [PATCH v2 " Zhichao Zeng
@ 2023-04-26  7:46   ` Tang, Yaqi
  0 siblings, 0 replies; 9+ messages in thread
From: Tang, Yaqi @ 2023-04-26  7:46 UTC (permalink / raw)
  To: Zeng, ZhichaoX, dev
  Cc: Zhang, Qi Z, Han, YingyaX, Richardson, Bruce, Konstantin Ananyev,
	Wu, Jingjing, Xing, Beilei


> -----Original Message-----
> From: Zeng, ZhichaoX <zhichaox.zeng@intel.com>
> Sent: Wednesday, April 12, 2023 2:50 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Tang, Yaqi <yaqi.tang@intel.com>;
> Han, YingyaX <yingyax.han@intel.com>; Zeng, ZhichaoX
> <zhichaox.zeng@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Konstantin Ananyev
> <konstantin.v.ananyev@yandex.ru>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v2 3/3] net/iavf: support Rx timestamp offload on SSE
> 
> This patch enables Rx timestamp offload on SSE data path.
> 
> Enable timestamp offload with the command '--enable-rx-timestamp', pay
> attention that getting Rx timestamp offload will drop the performance.
> 
> Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
> 
> ---
> v2: fix compile warning and timestamp error
> ---

Functional test passed. Cover SSE, AVX2 and AVX512 paths.

Tested-by: Yaqi Tang <yaqi.tang@intel.com>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 3/3] net/iavf: support Rx timestamp offload on SSE
  2023-04-12  8:46 ` Zhichao Zeng
@ 2023-04-26 15:31   ` David Marchand
  2023-04-27  1:38     ` Zeng, ZhichaoX
  2023-04-27  3:13   ` [PATCH v3 " Zhichao Zeng
  1 sibling, 1 reply; 9+ messages in thread
From: David Marchand @ 2023-04-26 15:31 UTC (permalink / raw)
  To: Zhichao Zeng
  Cc: dev, qi.z.zhang, yaqi.tang, yingyax.han, Bruce Richardson,
	Konstantin Ananyev, Jingjing Wu, Beilei Xing

On Wed, Apr 12, 2023 at 10:42 AM Zhichao Zeng <zhichaox.zeng@intel.com> wrote:
[snip]
> +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
> +#pragma GCC diagnostic push
> +#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
> +               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
> +                       inflection_point = (inflection_point <= var) ? inflection_point : 0;
> +                       switch (inflection_point) {
> +                       case 1:
> +                               *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
> +                                       iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
> +                       case 2:
> +                               *RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
> +                                       iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
> +                       case 3:
> +                               *RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
> +                                       iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
> +                       case 4:
> +                               *RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
> +                                       iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
> +                               rxq->phc_time += (uint64_t)1 << 32;
> +                       case 0:
> +                               break;
> +                       default:
> +                               printf("invalid inflection point for rx timestamp\n");

No printf.
For all 3 patches of this series, please replace with this driver
dedicated macro for debug logging.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH v2 3/3] net/iavf: support Rx timestamp offload on SSE
  2023-04-26 15:31   ` David Marchand
@ 2023-04-27  1:38     ` Zeng, ZhichaoX
  0 siblings, 0 replies; 9+ messages in thread
From: Zeng, ZhichaoX @ 2023-04-27  1:38 UTC (permalink / raw)
  To: David Marchand
  Cc: dev, Zhang, Qi Z, Tang, Yaqi, Han, YingyaX, Richardson, Bruce,
	Konstantin Ananyev, Wu, Jingjing, Xing, Beilei

Hi David,

> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Wednesday, April 26, 2023 11:32 PM
> To: Zeng, ZhichaoX <zhichaox.zeng@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Tang, Yaqi
> <yaqi.tang@intel.com>; Han, YingyaX <yingyax.han@intel.com>; Richardson,
> Bruce <bruce.richardson@intel.com>; Konstantin Ananyev
> <konstantin.v.ananyev@yandex.ru>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Subject: Re: [PATCH v2 3/3] net/iavf: support Rx timestamp offload on SSE
> 
> On Wed, Apr 12, 2023 at 10:42 AM Zhichao Zeng <zhichaox.zeng@intel.com>
> wrote:
> [snip]
> > +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC #pragma GCC diagnostic
> push
> > +#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
> > +               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
> > +                       inflection_point = (inflection_point <= var) ? inflection_point :
> 0;
> > +                       switch (inflection_point) {
> > +                       case 1:
> > +                               *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
> > +                                       iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
> > +                       case 2:
> > +                               *RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
> > +                                       iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
> > +                       case 3:
> > +                               *RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
> > +                                       iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
> > +                       case 4:
> > +                               *RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
> > +                                       iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
> > +                               rxq->phc_time += (uint64_t)1 << 32;
> > +                       case 0:
> > +                               break;
> > +                       default:
> > +                               printf("invalid inflection point for
> > +rx timestamp\n");
> 
> No printf.
> For all 3 patches of this series, please replace with this driver dedicated
> macro for debug logging.

Thanks for your comments, I will replace them in the next version.
> 
> --
> David Marchand


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v3 3/3] net/iavf: support Rx timestamp offload on SSE
  2023-04-12  8:46 ` Zhichao Zeng
  2023-04-26 15:31   ` David Marchand
@ 2023-04-27  3:13   ` Zhichao Zeng
  2023-04-28  5:40     ` Tang, Yaqi
  2023-05-26  2:43     ` [PATCH v4 " Zhichao Zeng
  1 sibling, 2 replies; 9+ messages in thread
From: Zhichao Zeng @ 2023-04-27  3:13 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, yaqi.tang, Zhichao Zeng, Bruce Richardson,
	Konstantin Ananyev, Jingjing Wu, Beilei Xing

This patch enables Rx timestamp offload on SSE data path.

Enable timestamp offload with the command '--enable-rx-timestamp',
pay attention that getting Rx timestamp offload will drop the performance.

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

---
v3: logging with driver dedicated macro
---
v2: fix compile warning and timestamp error
---
 drivers/net/iavf/iavf_rxtx_vec_sse.c | 161 ++++++++++++++++++++++++++-
 1 file changed, 157 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 3f30be01aa..fe1242f2ac 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -392,6 +392,11 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
 			_mm_extract_epi32(fdir_id0_3, 3);
 	} /* if() on fdir_enabled */
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		flags = _mm_or_si128(flags, _mm_set1_epi32(iavf_timestamp_dynflag));
+#endif
+
 	/**
 	 * At this point, we have the 4 sets of flags in the low 16-bits
 	 * of each 32-bit value in flags.
@@ -723,7 +728,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	int pos;
 	uint64_t var;
 	struct iavf_adapter *adapter = rxq->vsi->adapter;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+#endif
 	const uint32_t *ptype_tbl = adapter->ptype_tbl;
 	__m128i crc_adjust = _mm_set_epi16
 				(0, 0, 0,       /* ignore non-length fields */
@@ -793,6 +800,24 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	      rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	uint8_t inflection_point = 0;
+	bool is_tsinit = false;
+	__m128i hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+		if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) {
+			hw_low_last = _mm_setzero_si128();
+			is_tsinit = 1;
+		} else {
+			hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+		}
+	}
+
+#endif
+
 	/**
 	 * Compile-time verify the shuffle mask
 	 * NOTE: some field positions already verified above, but duplicated
@@ -895,11 +920,12 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 		/**
-		 * needs to load 2nd 16B of each desc for RSS hash parsing,
+		 * needs to load 2nd 16B of each desc,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
-			rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+		if (offloads & (RTE_ETH_RX_OFFLOAD_RSS_HASH |
+					RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
+				rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			descs_bh[3] = _mm_load_si128
 					((void *)(&rxdp[3].wb.status_error1));
@@ -964,7 +990,94 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 			pkt_mb2 = _mm_or_si128(pkt_mb2, vlan_tci2);
 			pkt_mb1 = _mm_or_si128(pkt_mb1, vlan_tci1);
 			pkt_mb0 = _mm_or_si128(pkt_mb0, vlan_tci0);
-		}
+		} /* if() on Vlan parsing */
+
+		if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t mask = 0xFFFFFFFF;
+			__m128i ts;
+			__m128i ts_low = _mm_setzero_si128();
+			__m128i ts_low1;
+			__m128i max_ret;
+			__m128i cmp_ret;
+			uint8_t ret = 0;
+			uint8_t shift = 4;
+			__m128i ts_desp_mask = _mm_set_epi32(mask, 0, 0, 0);
+			__m128i cmp_mask = _mm_set1_epi32(mask);
+
+			ts = _mm_and_si128(descs_bh[0], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 3 * 4));
+			ts = _mm_and_si128(descs_bh[1], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 2 * 4));
+			ts = _mm_and_si128(descs_bh[2], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 1 * 4));
+			ts = _mm_and_si128(descs_bh[3], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, ts);
+
+			ts_low1 = _mm_slli_si128(ts_low, 4);
+			ts_low1 = _mm_and_si128(ts_low, _mm_set_epi32(mask, mask, mask, 0));
+			ts_low1 = _mm_or_si128(ts_low1, hw_low_last);
+			hw_low_last = _mm_and_si128(ts_low, _mm_set_epi32(0, 0, 0, mask));
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 0);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 1);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 2);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 3);
+
+			if (unlikely(is_tsinit)) {
+				uint32_t in_timestamp;
+
+				if (iavf_get_phc_time(rxq))
+					PMD_DRV_LOG(ERR, "get physical time failed");
+				in_timestamp = *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+							iavf_timestamp_dynfield_offset, uint32_t *);
+				rxq->phc_time = iavf_tstamp_convert_32b_64b(rxq->phc_time, in_timestamp);
+			}
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+
+			max_ret = _mm_max_epu32(ts_low, ts_low1);
+			cmp_ret = _mm_andnot_si128(_mm_cmpeq_epi32(max_ret, ts_low), cmp_mask);
+
+			if (_mm_testz_si128(cmp_ret, cmp_mask)) {
+				inflection_point = 0;
+			} else {
+				inflection_point = 1;
+				while (shift > 1) {
+					shift = shift >> 1;
+					__m128i mask_low;
+					__m128i mask_high;
+					switch (shift) {
+					case 2:
+						mask_low = _mm_set_epi32(0, 0, mask, mask);
+						mask_high = _mm_set_epi32(mask, mask, 0, 0);
+						break;
+					case 1:
+						mask_low = _mm_srli_si128(cmp_mask, 4);
+						mask_high = _mm_slli_si128(cmp_mask, 4);
+						break;
+					}
+					ret = _mm_testz_si128(cmp_ret, mask_low);
+					if (ret) {
+						ret = _mm_testz_si128(cmp_ret, mask_high);
+						inflection_point += ret ? 0 : shift;
+						cmp_mask = mask_high;
+					} else {
+						cmp_mask = mask_low;
+					}
+				}
+			}
+		} /* if() on Timestamp parsing */
 
 		flex_desc_to_olflags_v(rxq, descs, descs_bh, &rx_pkts[pos]);
 #else
@@ -1011,10 +1124,50 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		/* C.4 calc available number of desc */
 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
 		nb_pkts_recd += var;
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			inflection_point = (inflection_point <= var) ? inflection_point : 0;
+			switch (inflection_point) {
+			case 1:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 2:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 3:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 4:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+				rxq->phc_time += (uint64_t)1 << 32;
+			case 0:
+				break;
+			default:
+				PMD_DRV_LOG(ERR, "invalid inflection point for rx timestamp");
+				break;
+			}
+
+			rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		}
+#pragma GCC diagnostic pop
+#endif
+
 		if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
 			break;
 	}
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+	if (nb_pkts_recd > 0 && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
+		rxq->phc_time = *RTE_MBUF_DYNFIELD(rx_pkts[nb_pkts_recd - 1],
+						iavf_timestamp_dynfield_offset, uint32_t *);
+#endif
+#endif
+
 	/* Update our internal tail pointer */
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH v3 3/3] net/iavf: support Rx timestamp offload on SSE
  2023-04-27  3:13   ` [PATCH v3 " Zhichao Zeng
@ 2023-04-28  5:40     ` Tang, Yaqi
  2023-05-26  2:43     ` [PATCH v4 " Zhichao Zeng
  1 sibling, 0 replies; 9+ messages in thread
From: Tang, Yaqi @ 2023-04-28  5:40 UTC (permalink / raw)
  To: Zeng, ZhichaoX, dev
  Cc: Zhang, Qi Z, Richardson, Bruce, Konstantin Ananyev, Wu, Jingjing,
	Xing, Beilei


> -----Original Message-----
> From: Zeng, ZhichaoX <zhichaox.zeng@intel.com>
> Sent: Thursday, April 27, 2023 11:14 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Tang, Yaqi <yaqi.tang@intel.com>;
> Zeng, ZhichaoX <zhichaox.zeng@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Konstantin Ananyev
> <konstantin.v.ananyev@yandex.ru>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v3 3/3] net/iavf: support Rx timestamp offload on SSE
> 
> This patch enables Rx timestamp offload on SSE data path.
> 
> Enable timestamp offload with the command '--enable-rx-timestamp', pay
> attention that getting Rx timestamp offload will drop the performance.
> 
> Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
> 
> ---
> v3: logging with driver dedicated macro
> ---
> v2: fix compile warning and timestamp error
> ---

Functional test passed. Cover SSE, AVX2 and AVX512 paths.

Tested-by: Yaqi Tang <yaqi.tang@intel.com>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v4 3/3] net/iavf: support Rx timestamp offload on SSE
  2023-04-27  3:13   ` [PATCH v3 " Zhichao Zeng
  2023-04-28  5:40     ` Tang, Yaqi
@ 2023-05-26  2:43     ` Zhichao Zeng
  1 sibling, 0 replies; 9+ messages in thread
From: Zhichao Zeng @ 2023-05-26  2:43 UTC (permalink / raw)
  To: dev
  Cc: qi.z.zhang, yaqi.tang, Zhichao Zeng, Bruce Richardson,
	Konstantin Ananyev, Jingjing Wu, Beilei Xing

This patch enables Rx timestamp offload on SSE data path.

Enable timestamp offload with the command '--enable-rx-timestamp',
pay attention that getting Rx timestamp offload will drop the performance.

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

---
v4: rework avx2 patch base on offload path
---
v3: logging with driver dedicated macro
---
v2: fix compile warning and timestamp error
---
 drivers/net/iavf/iavf_rxtx_vec_sse.c | 161 ++++++++++++++++++++++++++-
 1 file changed, 157 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 3f30be01aa..fe1242f2ac 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -392,6 +392,11 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
 			_mm_extract_epi32(fdir_id0_3, 3);
 	} /* if() on fdir_enabled */
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		flags = _mm_or_si128(flags, _mm_set1_epi32(iavf_timestamp_dynflag));
+#endif
+
 	/**
 	 * At this point, we have the 4 sets of flags in the low 16-bits
 	 * of each 32-bit value in flags.
@@ -723,7 +728,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	int pos;
 	uint64_t var;
 	struct iavf_adapter *adapter = rxq->vsi->adapter;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+#endif
 	const uint32_t *ptype_tbl = adapter->ptype_tbl;
 	__m128i crc_adjust = _mm_set_epi16
 				(0, 0, 0,       /* ignore non-length fields */
@@ -793,6 +800,24 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	      rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	uint8_t inflection_point = 0;
+	bool is_tsinit = false;
+	__m128i hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+		if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) {
+			hw_low_last = _mm_setzero_si128();
+			is_tsinit = 1;
+		} else {
+			hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+		}
+	}
+
+#endif
+
 	/**
 	 * Compile-time verify the shuffle mask
 	 * NOTE: some field positions already verified above, but duplicated
@@ -895,11 +920,12 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 		/**
-		 * needs to load 2nd 16B of each desc for RSS hash parsing,
+		 * needs to load 2nd 16B of each desc,
 		 * will cause performance drop to get into this context.
 		 */
-		if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
-			rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+		if (offloads & (RTE_ETH_RX_OFFLOAD_RSS_HASH |
+					RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
+				rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			descs_bh[3] = _mm_load_si128
 					((void *)(&rxdp[3].wb.status_error1));
@@ -964,7 +990,94 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 			pkt_mb2 = _mm_or_si128(pkt_mb2, vlan_tci2);
 			pkt_mb1 = _mm_or_si128(pkt_mb1, vlan_tci1);
 			pkt_mb0 = _mm_or_si128(pkt_mb0, vlan_tci0);
-		}
+		} /* if() on Vlan parsing */
+
+		if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t mask = 0xFFFFFFFF;
+			__m128i ts;
+			__m128i ts_low = _mm_setzero_si128();
+			__m128i ts_low1;
+			__m128i max_ret;
+			__m128i cmp_ret;
+			uint8_t ret = 0;
+			uint8_t shift = 4;
+			__m128i ts_desp_mask = _mm_set_epi32(mask, 0, 0, 0);
+			__m128i cmp_mask = _mm_set1_epi32(mask);
+
+			ts = _mm_and_si128(descs_bh[0], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 3 * 4));
+			ts = _mm_and_si128(descs_bh[1], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 2 * 4));
+			ts = _mm_and_si128(descs_bh[2], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 1 * 4));
+			ts = _mm_and_si128(descs_bh[3], ts_desp_mask);
+			ts_low = _mm_or_si128(ts_low, ts);
+
+			ts_low1 = _mm_slli_si128(ts_low, 4);
+			ts_low1 = _mm_and_si128(ts_low, _mm_set_epi32(mask, mask, mask, 0));
+			ts_low1 = _mm_or_si128(ts_low1, hw_low_last);
+			hw_low_last = _mm_and_si128(ts_low, _mm_set_epi32(0, 0, 0, mask));
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 0);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 1);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 2);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 3);
+
+			if (unlikely(is_tsinit)) {
+				uint32_t in_timestamp;
+
+				if (iavf_get_phc_time(rxq))
+					PMD_DRV_LOG(ERR, "get physical time failed");
+				in_timestamp = *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+							iavf_timestamp_dynfield_offset, uint32_t *);
+				rxq->phc_time = iavf_tstamp_convert_32b_64b(rxq->phc_time, in_timestamp);
+			}
+
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+			*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+				iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+
+			max_ret = _mm_max_epu32(ts_low, ts_low1);
+			cmp_ret = _mm_andnot_si128(_mm_cmpeq_epi32(max_ret, ts_low), cmp_mask);
+
+			if (_mm_testz_si128(cmp_ret, cmp_mask)) {
+				inflection_point = 0;
+			} else {
+				inflection_point = 1;
+				while (shift > 1) {
+					shift = shift >> 1;
+					__m128i mask_low;
+					__m128i mask_high;
+					switch (shift) {
+					case 2:
+						mask_low = _mm_set_epi32(0, 0, mask, mask);
+						mask_high = _mm_set_epi32(mask, mask, 0, 0);
+						break;
+					case 1:
+						mask_low = _mm_srli_si128(cmp_mask, 4);
+						mask_high = _mm_slli_si128(cmp_mask, 4);
+						break;
+					}
+					ret = _mm_testz_si128(cmp_ret, mask_low);
+					if (ret) {
+						ret = _mm_testz_si128(cmp_ret, mask_high);
+						inflection_point += ret ? 0 : shift;
+						cmp_mask = mask_high;
+					} else {
+						cmp_mask = mask_low;
+					}
+				}
+			}
+		} /* if() on Timestamp parsing */
 
 		flex_desc_to_olflags_v(rxq, descs, descs_bh, &rx_pkts[pos]);
 #else
@@ -1011,10 +1124,50 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		/* C.4 calc available number of desc */
 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
 		nb_pkts_recd += var;
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			inflection_point = (inflection_point <= var) ? inflection_point : 0;
+			switch (inflection_point) {
+			case 1:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 2:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 3:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+			case 4:
+				*RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+					iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+				rxq->phc_time += (uint64_t)1 << 32;
+			case 0:
+				break;
+			default:
+				PMD_DRV_LOG(ERR, "invalid inflection point for rx timestamp");
+				break;
+			}
+
+			rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		}
+#pragma GCC diagnostic pop
+#endif
+
 		if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
 			break;
 	}
 
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+	if (nb_pkts_recd > 0 && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
+		rxq->phc_time = *RTE_MBUF_DYNFIELD(rx_pkts[nb_pkts_recd - 1],
+						iavf_timestamp_dynfield_offset, uint32_t *);
+#endif
+#endif
+
 	/* Update our internal tail pointer */
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
 	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
-- 
2.34.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2023-05-26  2:37 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-10  7:36 [PATCH 3/3] net/iavf: support Rx timestamp offload on SSE Zhichao Zeng
2023-04-12  6:50 ` [PATCH v2 " Zhichao Zeng
2023-04-26  7:46   ` Tang, Yaqi
2023-04-12  8:46 ` Zhichao Zeng
2023-04-26 15:31   ` David Marchand
2023-04-27  1:38     ` Zeng, ZhichaoX
2023-04-27  3:13   ` [PATCH v3 " Zhichao Zeng
2023-04-28  5:40     ` Tang, Yaqi
2023-05-26  2:43     ` [PATCH v4 " Zhichao Zeng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).