DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wenjing Qiao <wenjing.qiao@intel.com>
To: jingjing.wu@intel.com, beilei.xing@intel.com, qi.z.zhang@intel.com
Cc: dev@dpdk.org, mingxia.liu@intel.com,
	Wenjing Qiao <wenjing.qiao@intel.com>,
	stable@dpdk.org
Subject: [PATCH v3 1/7] common/idpf: fix 64b timestamp roll over issue
Date: Mon, 24 Apr 2023 05:17:01 -0400	[thread overview]
Message-ID: <20230424091707.488045-2-wenjing.qiao@intel.com> (raw)
In-Reply-To: <20230424091707.488045-1-wenjing.qiao@intel.com>

Reading MTS register at first packet will cause timestamp
roll over issue. To support calculating 64b timestamp, need
an alarm to save main time from registers every 1 second.

Fixes: 8c6098afa075 ("common/idpf: add Rx/Tx data path")
Cc: stable@dpdk.org

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/common/idpf/idpf_common_rxtx.c | 108 ++++++++++++-------------
 drivers/common/idpf/idpf_common_rxtx.h |   3 +-
 drivers/common/idpf/version.map        |   1 +
 3 files changed, 55 insertions(+), 57 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..19bcb94077 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -4,6 +4,7 @@
 
 #include <rte_mbuf_dyn.h>
 #include <rte_errno.h>
+#include <rte_alarm.h>
 
 #include "idpf_common_rxtx.h"
 
@@ -442,56 +443,23 @@ idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
 	return 0;
 }
 
-#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
 /* Helper function to convert a 32b nanoseconds timestamp to 64b. */
 static inline uint64_t
-idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
-			    uint32_t in_timestamp)
+idpf_tstamp_convert_32b_64b(uint64_t time_hw, uint32_t in_timestamp)
 {
-#ifdef RTE_ARCH_X86_64
-	struct idpf_hw *hw = &ad->hw;
 	const uint64_t mask = 0xFFFFFFFF;
-	uint32_t hi, lo, lo2, delta;
+	const uint32_t half_overflow_duration = 0x1 << 31;
+	uint32_t delta;
 	uint64_t ns;
 
-	if (flag != 0) {
-		IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
-		IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
-			       PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
-		lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-		hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
-		/*
-		 * On typical system, the delta between lo and lo2 is ~1000ns,
-		 * so 10000 seems a large-enough but not overly-big guard band.
-		 */
-		if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
-			lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-		else
-			lo2 = lo;
-
-		if (lo2 < lo) {
-			lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-			hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
-		}
-
-		ad->time_hw = ((uint64_t)hi << 32) | lo;
-	}
-
-	delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
-	if (delta > (mask / 2)) {
-		delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
-		ns = ad->time_hw - delta;
+	delta = (in_timestamp - (uint32_t)(time_hw & mask));
+	if (delta > half_overflow_duration) {
+		delta = ((uint32_t)(time_hw & mask) - in_timestamp);
+		ns = time_hw - delta;
 	} else {
-		ns = ad->time_hw + delta;
+		ns = time_hw + delta;
 	}
-
 	return ns;
-#else /* !RTE_ARCH_X86_64 */
-	RTE_SET_USED(ad);
-	RTE_SET_USED(flag);
-	RTE_SET_USED(in_timestamp);
-	return 0;
-#endif /* RTE_ARCH_X86_64 */
 }
 
 #define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S				\
@@ -659,9 +627,6 @@ idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	rx_desc_ring = rxq->rx_ring;
 	ptype_tbl = rxq->adapter->ptype_tbl;
 
-	if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)
-		rxq->hw_register_set = 1;
-
 	while (nb_rx < nb_pkts) {
 		rx_desc = &rx_desc_ring[rx_id];
 
@@ -720,10 +685,8 @@ idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		if (idpf_timestamp_dynflag > 0 &&
 		    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP)) {
 			/* timestamp */
-			ts_ns = idpf_tstamp_convert_32b_64b(ad,
-							    rxq->hw_register_set,
+			ts_ns = idpf_tstamp_convert_32b_64b(ad->time_hw,
 							    rte_le_to_cpu_32(rx_desc->ts_high));
-			rxq->hw_register_set = 0;
 			*RTE_MBUF_DYNFIELD(rxm,
 					   idpf_timestamp_dynfield_offset,
 					   rte_mbuf_timestamp_t *) = ts_ns;
@@ -1077,9 +1040,6 @@ idpf_dp_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	rx_ring = rxq->rx_ring;
 	ptype_tbl = rxq->adapter->ptype_tbl;
 
-	if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)
-		rxq->hw_register_set = 1;
-
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
 		rx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0);
@@ -1142,10 +1102,8 @@ idpf_dp_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		if (idpf_timestamp_dynflag > 0 &&
 		    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {
 			/* timestamp */
-			ts_ns = idpf_tstamp_convert_32b_64b(ad,
-					    rxq->hw_register_set,
+			ts_ns = idpf_tstamp_convert_32b_64b(ad->time_hw,
 					    rte_le_to_cpu_32(rxd.flex_nic_wb.flex_ts.ts_high));
-			rxq->hw_register_set = 0;
 			*RTE_MBUF_DYNFIELD(rxm,
 					   idpf_timestamp_dynfield_offset,
 					   rte_mbuf_timestamp_t *) = ts_ns;
@@ -1272,10 +1230,8 @@ idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		if (idpf_timestamp_dynflag > 0 &&
 		    (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {
 			/* timestamp */
-			ts_ns = idpf_tstamp_convert_32b_64b(ad,
-				rxq->hw_register_set,
+			ts_ns = idpf_tstamp_convert_32b_64b(ad->time_hw,
 				rte_le_to_cpu_32(rxd.flex_nic_wb.flex_ts.ts_high));
-			rxq->hw_register_set = 0;
 			*RTE_MBUF_DYNFIELD(rxm,
 					   idpf_timestamp_dynfield_offset,
 					   rte_mbuf_timestamp_t *) = ts_ns;
@@ -1621,3 +1577,43 @@ idpf_qc_splitq_rx_vec_setup(struct idpf_rx_queue *rxq)
 	rxq->bufq2->ops = &def_rx_ops_vec;
 	return idpf_rxq_vec_setup_default(rxq->bufq2);
 }
+
+#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND	10000
+void
+idpf_dev_read_time_hw(void *cb_arg)
+{
+#ifdef RTE_ARCH_X86_64
+	struct idpf_adapter *ad = (struct idpf_adapter *)cb_arg;
+	uint32_t hi, lo, lo2;
+	int rc = 0;
+	struct idpf_hw *hw = &ad->hw;
+
+	IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+	IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0,
+		       PF_GLTSYN_CMD_SYNC_EXEC_CMD_M | PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+	lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+	hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+	/*
+	 * On typical system, the delta between lo and lo2 is ~1000ns,
+	 * so 10000 seems a large-enough but not overly-big guard band.
+	 */
+	if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
+		lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+	else
+		lo2 = lo;
+
+	if (lo2 < lo) {
+		lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+		hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+	}
+
+	ad->time_hw = ((uint64_t)hi << 32) | lo;
+#else  /* !RTE_ARCH_X86_64 */
+	ad->time_hw = 0;
+#endif /* RTE_ARCH_X86_64 */
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(1000 * 1000, &idpf_dev_read_time_hw, cb_arg);
+	if (rc)
+		DRV_LOG(ERR, "Failed to reset device watchdog alarm");
+}
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index 11260d07f9..af1425eb3f 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -142,7 +142,6 @@ struct idpf_rx_queue {
 	struct idpf_rx_queue *bufq2;
 
 	uint64_t offloads;
-	uint32_t hw_register_set;
 };
 
 struct idpf_tx_entry {
@@ -300,4 +299,6 @@ __rte_internal
 uint16_t idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			  uint16_t nb_pkts);
 
+__rte_internal
+void idpf_dev_read_time_hw(void *cb_arg);
 #endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..c67c554911 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -14,6 +14,7 @@ INTERNAL {
 	idpf_dp_splitq_recv_pkts_avx512;
 	idpf_dp_splitq_xmit_pkts;
 	idpf_dp_splitq_xmit_pkts_avx512;
+	idpf_dev_read_time_hw;
 
 	idpf_qc_rx_thresh_check;
 	idpf_qc_rx_queue_release;
-- 
2.25.1


  reply	other threads:[~2023-04-24  9:22 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-20  9:19 [PATCH 0/7] update idpf and cpfl timestamp Wenjing Qiao
2023-04-20  9:19 ` [PATCH 1/7] common/idpf: fix 64b timestamp roll over issue Wenjing Qiao
2023-04-21  7:15   ` [PATCH v2 0/7] update idpf and cpfl timestamp Wenjing Qiao
2023-04-21  7:15     ` [PATCH v2 1/7] common/idpf: fix 64b timestamp roll over issue Wenjing Qiao
2023-04-24  9:17       ` [PATCH v3 0/7] fix and enhance idpf and cpfl timestamp Wenjing Qiao
2023-04-24  9:17         ` Wenjing Qiao [this message]
2023-05-19  8:31           ` [PATCH v4 0/7] fix " Wenjing Qiao
2023-05-19  8:31             ` [PATCH v4 1/7] common/idpf: fix 64b timestamp roll over issue Wenjing Qiao
2023-05-24  8:36               ` Liu, Mingxia
2023-05-19  8:31             ` [PATCH v4 2/7] net/idpf: save main time by alarm Wenjing Qiao
2023-05-19  8:31             ` [PATCH v4 3/7] net/cpfl: " Wenjing Qiao
2023-05-19  8:31             ` [PATCH v4 4/7] common/idpf: enhance timestamp offload feature for ACC Wenjing Qiao
2023-05-19  8:31             ` [PATCH v4 5/7] common/idpf: add timestamp enable flag for rxq Wenjing Qiao
2023-05-19  8:31             ` [PATCH v4 6/7] net/cpfl: adjust timestamp mbuf register Wenjing Qiao
2023-05-19  8:31             ` [PATCH v4 7/7] net/idpf: " Wenjing Qiao
2023-04-24  9:17         ` [PATCH v3 2/7] net/idpf: save main time by alarm Wenjing Qiao
2023-04-24  9:17         ` [PATCH v3 3/7] net/cpfl: " Wenjing Qiao
2023-04-24  9:17         ` [PATCH v3 4/7] common/idpf: enhance timestamp offload feature for ACC Wenjing Qiao
2023-04-24  9:17         ` [PATCH v3 5/7] common/idpf: add timestamp enable flag for rxq Wenjing Qiao
2023-04-24  9:17         ` [PATCH v3 6/7] net/cpfl: register timestamp mbuf when starting dev Wenjing Qiao
2023-04-28  3:24           ` Zhang, Qi Z
2023-04-24  9:17         ` [PATCH v3 7/7] net/idpf: " Wenjing Qiao
2023-04-21  7:15     ` [PATCH v2 2/7] net/idpf: save main time by alarm Wenjing Qiao
2023-04-28  2:46       ` Zhang, Qi Z
2023-04-21  7:15     ` [PATCH v2 3/7] net/cpfl: " Wenjing Qiao
2023-04-21  7:16     ` [PATCH v2 4/7] common/idpf: support timestamp offload feature for ACC Wenjing Qiao
2023-04-21  7:16     ` [PATCH v2 5/7] common/idpf: add timestamp enable flag for rxq Wenjing Qiao
2023-04-21  7:16     ` [PATCH v2 6/7] net/cpfl: register timestamp mbuf when starting dev Wenjing Qiao
2023-04-21  7:16     ` [PATCH v2 7/7] net/idpf: " Wenjing Qiao
2023-04-20  9:19 ` [PATCH 2/7] net/idpf: save master time by alarm Wenjing Qiao
2023-04-20  9:19 ` [PATCH 3/7] net/cpfl: " Wenjing Qiao
2023-04-20  9:19 ` [PATCH 4/7] common/idpf: support timestamp offload feature for ACC Wenjing Qiao
2023-04-20  9:19 ` [PATCH 5/7] common/idpf: add timestamp enable flag for rxq Wenjing Qiao
2023-04-20  9:19 ` [PATCH 6/7] net/cpfl: register timestamp mbuf when starting dev Wenjing Qiao
2023-04-20  9:19 ` [PATCH 7/7] net/idpf: " Wenjing Qiao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230424091707.488045-2-wenjing.qiao@intel.com \
    --to=wenjing.qiao@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=mingxia.liu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).