From: Zhichao Zeng <zhichaox.zeng@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, yaqi.tang@intel.com,
Zhichao Zeng <zhichaox.zeng@intel.com>,
Bruce Richardson <bruce.richardson@intel.com>,
Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
Jingjing Wu <jingjing.wu@intel.com>,
Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH v4 3/3] net/iavf: support Rx timestamp offload on SSE
Date: Fri, 26 May 2023 10:43:28 +0800 [thread overview]
Message-ID: <20230526024328.2575254-1-zhichaox.zeng@intel.com> (raw)
In-Reply-To: <20230427031330.1729969-1-zhichaox.zeng@intel.com>
This patch enables Rx timestamp offload on SSE data path.
Enable timestamp offload with the command '--enable-rx-timestamp',
pay attention that getting Rx timestamp offload will drop the performance.
Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
---
v4: rework avx2 patch base on offload path
---
v3: logging with driver dedicated macro
---
v2: fix compile warning and timestamp error
---
drivers/net/iavf/iavf_rxtx_vec_sse.c | 161 ++++++++++++++++++++++++++-
1 file changed, 157 insertions(+), 4 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 3f30be01aa..fe1242f2ac 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -392,6 +392,11 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
_mm_extract_epi32(fdir_id0_3, 3);
} /* if() on fdir_enabled */
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ flags = _mm_or_si128(flags, _mm_set1_epi32(iavf_timestamp_dynflag));
+#endif
+
/**
* At this point, we have the 4 sets of flags in the low 16-bits
* of each 32-bit value in flags.
@@ -723,7 +728,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
int pos;
uint64_t var;
struct iavf_adapter *adapter = rxq->vsi->adapter;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+#endif
const uint32_t *ptype_tbl = adapter->ptype_tbl;
__m128i crc_adjust = _mm_set_epi16
(0, 0, 0, /* ignore non-length fields */
@@ -793,6 +800,24 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
rte_cpu_to_le_32(1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ uint8_t inflection_point = 0;
+ bool is_tsinit = false;
+ __m128i hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) {
+ hw_low_last = _mm_setzero_si128();
+ is_tsinit = 1;
+ } else {
+ hw_low_last = _mm_set_epi32(0, 0, 0, (uint32_t)rxq->phc_time);
+ }
+ }
+
+#endif
+
/**
* Compile-time verify the shuffle mask
* NOTE: some field positions already verified above, but duplicated
@@ -895,11 +920,12 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
/**
- * needs to load 2nd 16B of each desc for RSS hash parsing,
+ * needs to load 2nd 16B of each desc,
* will cause performance drop to get into this context.
*/
- if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH ||
- rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ if (offloads & (RTE_ETH_RX_OFFLOAD_RSS_HASH |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP) ||
+ rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
/* load bottom half of every 32B desc */
descs_bh[3] = _mm_load_si128
((void *)(&rxdp[3].wb.status_error1));
@@ -964,7 +990,94 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
pkt_mb2 = _mm_or_si128(pkt_mb2, vlan_tci2);
pkt_mb1 = _mm_or_si128(pkt_mb1, vlan_tci1);
pkt_mb0 = _mm_or_si128(pkt_mb0, vlan_tci0);
- }
+ } /* if() on Vlan parsing */
+
+ if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ uint32_t mask = 0xFFFFFFFF;
+ __m128i ts;
+ __m128i ts_low = _mm_setzero_si128();
+ __m128i ts_low1;
+ __m128i max_ret;
+ __m128i cmp_ret;
+ uint8_t ret = 0;
+ uint8_t shift = 4;
+ __m128i ts_desp_mask = _mm_set_epi32(mask, 0, 0, 0);
+ __m128i cmp_mask = _mm_set1_epi32(mask);
+
+ ts = _mm_and_si128(descs_bh[0], ts_desp_mask);
+ ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 3 * 4));
+ ts = _mm_and_si128(descs_bh[1], ts_desp_mask);
+ ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 2 * 4));
+ ts = _mm_and_si128(descs_bh[2], ts_desp_mask);
+ ts_low = _mm_or_si128(ts_low, _mm_srli_si128(ts, 1 * 4));
+ ts = _mm_and_si128(descs_bh[3], ts_desp_mask);
+ ts_low = _mm_or_si128(ts_low, ts);
+
+ ts_low1 = _mm_slli_si128(ts_low, 4);
+ ts_low1 = _mm_and_si128(ts_low, _mm_set_epi32(mask, mask, mask, 0));
+ ts_low1 = _mm_or_si128(ts_low1, hw_low_last);
+ hw_low_last = _mm_and_si128(ts_low, _mm_set_epi32(0, 0, 0, mask));
+
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+ iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 0);
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+ iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 1);
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+ iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 2);
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+ iavf_timestamp_dynfield_offset, uint32_t *) = _mm_extract_epi32(ts_low, 3);
+
+ if (unlikely(is_tsinit)) {
+ uint32_t in_timestamp;
+
+ if (iavf_get_phc_time(rxq))
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ in_timestamp = *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+ iavf_timestamp_dynfield_offset, uint32_t *);
+ rxq->phc_time = iavf_tstamp_convert_32b_64b(rxq->phc_time, in_timestamp);
+ }
+
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+ iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+ iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+ iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+ iavf_timestamp_dynfield_offset + 4, uint32_t *) = (uint32_t)(rxq->phc_time >> 32);
+
+ max_ret = _mm_max_epu32(ts_low, ts_low1);
+ cmp_ret = _mm_andnot_si128(_mm_cmpeq_epi32(max_ret, ts_low), cmp_mask);
+
+ if (_mm_testz_si128(cmp_ret, cmp_mask)) {
+ inflection_point = 0;
+ } else {
+ inflection_point = 1;
+ while (shift > 1) {
+ shift = shift >> 1;
+ __m128i mask_low;
+ __m128i mask_high;
+ switch (shift) {
+ case 2:
+ mask_low = _mm_set_epi32(0, 0, mask, mask);
+ mask_high = _mm_set_epi32(mask, mask, 0, 0);
+ break;
+ case 1:
+ mask_low = _mm_srli_si128(cmp_mask, 4);
+ mask_high = _mm_slli_si128(cmp_mask, 4);
+ break;
+ }
+ ret = _mm_testz_si128(cmp_ret, mask_low);
+ if (ret) {
+ ret = _mm_testz_si128(cmp_ret, mask_high);
+ inflection_point += ret ? 0 : shift;
+ cmp_mask = mask_high;
+ } else {
+ cmp_mask = mask_low;
+ }
+ }
+ }
+ } /* if() on Timestamp parsing */
flex_desc_to_olflags_v(rxq, descs, descs_bh, &rx_pkts[pos]);
#else
@@ -1011,10 +1124,50 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
/* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ inflection_point = (inflection_point <= var) ? inflection_point : 0;
+ switch (inflection_point) {
+ case 1:
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 0],
+ iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+ case 2:
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 1],
+ iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+ case 3:
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 2],
+ iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+ case 4:
+ *RTE_MBUF_DYNFIELD(rx_pkts[pos + 3],
+ iavf_timestamp_dynfield_offset + 4, uint32_t *) += 1;
+ rxq->phc_time += (uint64_t)1 << 32;
+ case 0:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid inflection point for rx timestamp");
+ break;
+ }
+
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ }
+#pragma GCC diagnostic pop
+#endif
+
if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
break;
}
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+#ifdef IAVF_RX_TS_OFFLOAD
+ if (nb_pkts_recd > 0 && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
+ rxq->phc_time = *RTE_MBUF_DYNFIELD(rx_pkts[nb_pkts_recd - 1],
+ iavf_timestamp_dynfield_offset, uint32_t *);
+#endif
+#endif
+
/* Update our internal tail pointer */
rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
--
2.34.1
next prev parent reply other threads:[~2023-05-26 2:37 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-10 7:36 [PATCH " Zhichao Zeng
2023-04-12 6:50 ` [PATCH v2 " Zhichao Zeng
2023-04-26 7:46 ` Tang, Yaqi
2023-04-12 8:46 ` Zhichao Zeng
2023-04-26 15:31 ` David Marchand
2023-04-27 1:38 ` Zeng, ZhichaoX
2023-04-27 3:13 ` [PATCH v3 " Zhichao Zeng
2023-04-28 5:40 ` Tang, Yaqi
2023-05-26 2:43 ` Zhichao Zeng [this message]
2023-04-27 3:12 [PATCH v3 1/3] net/iavf: support Rx timestamp offload on AVX512 Zhichao Zeng
2023-05-26 9:50 ` [PATCH v4 0/3] Enable iavf Rx Timestamp offload on vector path Zhichao Zeng
2023-05-26 9:50 ` [PATCH v4 3/3] net/iavf: support Rx timestamp offload on SSE Zhichao Zeng
2023-05-29 2:23 ` [PATCH v4 0/3] Enable iavf Rx Timestamp offload on vector path Zhichao Zeng
2023-05-29 2:23 ` [PATCH v4 3/3] net/iavf: support Rx timestamp offload on SSE Zhichao Zeng
2023-06-01 2:49 ` Tang, Yaqi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230526024328.2575254-1-zhichaox.zeng@intel.com \
--to=zhichaox.zeng@intel.com \
--cc=beilei.xing@intel.com \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=qi.z.zhang@intel.com \
--cc=yaqi.tang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).