From: Wenjing Qiao <wenjing.qiao@intel.com>
To: jingjing.wu@intel.com, beilei.xing@intel.com, qi.z.zhang@intel.com
Cc: dev@dpdk.org, mingxia.liu@intel.com,
Wenjing Qiao <wenjing.qiao@intel.com>,
stable@dpdk.org
Subject: [PATCH v4 1/7] common/idpf: fix 64b timestamp roll over issue
Date: Fri, 19 May 2023 04:31:04 -0400 [thread overview]
Message-ID: <20230519083110.809913-2-wenjing.qiao@intel.com> (raw)
In-Reply-To: <20230519083110.809913-1-wenjing.qiao@intel.com>
Reading MTS register at first packet will cause timestamp
roll over issue. To support calculating 64b timestamp, need
an alarm to save main time from registers every 1 second.
Fixes: 8c6098afa075 ("common/idpf: add Rx/Tx data path")
Cc: stable@dpdk.org
Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
drivers/common/idpf/idpf_common_rxtx.c | 126 ++++++++++++++-----------
drivers/common/idpf/idpf_common_rxtx.h | 6 +-
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+), 57 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..b487c2a8a6 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -4,6 +4,7 @@
#include <rte_mbuf_dyn.h>
#include <rte_errno.h>
+#include <rte_alarm.h>
#include "idpf_common_rxtx.h"
@@ -349,6 +350,46 @@ idpf_qc_tx_queue_release(void *txq)
rte_free(q);
}
+#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND 10000
+static void
+idpf_dev_read_time_hw(void *cb_arg)
+{
+#ifdef RTE_ARCH_X86_64
+ struct idpf_adapter *ad = (struct idpf_adapter *)cb_arg;
+ uint32_t hi, lo, lo2;
+ int rc = 0;
+ struct idpf_hw *hw = &ad->hw;
+
+ IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+ IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0,
+ PF_GLTSYN_CMD_SYNC_EXEC_CMD_M | PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+ lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+ hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+ /*
+ * On typical system, the delta between lo and lo2 is ~1000ns,
+ * so 10000 seems a large-enough but not overly-big guard band.
+ */
+ if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
+ lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+ else
+ lo2 = lo;
+
+ if (lo2 < lo) {
+ lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+ hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+ }
+
+ ad->time_hw = ((uint64_t)hi << 32) | lo;
+#else /* !RTE_ARCH_X86_64 */
+ ad->time_hw = 0;
+#endif /* RTE_ARCH_X86_64 */
+
+ /* re-alarm watchdog */
+ rc = rte_eal_alarm_set(1000 * 1000, &idpf_dev_read_time_hw, cb_arg);
+ if (rc)
+ DRV_LOG(ERR, "Failed to reset device watchdog alarm");
+}
+
int
idpf_qc_ts_mbuf_register(struct idpf_rx_queue *rxq)
{
@@ -366,6 +407,24 @@ idpf_qc_ts_mbuf_register(struct idpf_rx_queue *rxq)
return 0;
}
+
+int
+idpf_rx_timestamp_start(struct idpf_adapter *base)
+{
+ rte_eal_alarm_set(1000 * 1000,
+ &idpf_dev_read_time_hw,
+ (void *)base);
+ return 0;
+}
+
+int
+idpf_rx_timestamp_stop(struct idpf_adapter *base)
+{
+ rte_eal_alarm_cancel(idpf_dev_read_time_hw,
+ base);
+ return 0;
+}
+
int
idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
{
@@ -442,56 +501,23 @@ idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
return 0;
}
-#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND 10000
/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
static inline uint64_t
-idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
- uint32_t in_timestamp)
+idpf_tstamp_convert_32b_64b(uint64_t time_hw, uint32_t in_timestamp)
{
-#ifdef RTE_ARCH_X86_64
- struct idpf_hw *hw = &ad->hw;
const uint64_t mask = 0xFFFFFFFF;
- uint32_t hi, lo, lo2, delta;
+ const uint32_t half_overflow_duration = 0x1 << 31;
+ uint32_t delta;
uint64_t ns;
- if (flag != 0) {
- IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
- IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
- PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
- lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
- hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
- /*
- * On typical system, the delta between lo and lo2 is ~1000ns,
- * so 10000 seems a large-enough but not overly-big guard band.
- */
- if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
- lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
- else
- lo2 = lo;
-
- if (lo2 < lo) {
- lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
- hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
- }
-
- ad->time_hw = ((uint64_t)hi << 32) | lo;
- }
-
- delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
- if (delta > (mask / 2)) {
- delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
- ns = ad->time_hw - delta;
+ delta = (in_timestamp - (uint32_t)(time_hw & mask));
+ if (delta > half_overflow_duration) {
+ delta = ((uint32_t)(time_hw & mask) - in_timestamp);
+ ns = time_hw - delta;
} else {
- ns = ad->time_hw + delta;
+ ns = time_hw + delta;
}
-
return ns;
-#else /* !RTE_ARCH_X86_64 */
- RTE_SET_USED(ad);
- RTE_SET_USED(flag);
- RTE_SET_USED(in_timestamp);
- return 0;
-#endif /* RTE_ARCH_X86_64 */
}
#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S \
@@ -659,9 +685,6 @@ idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_desc_ring = rxq->rx_ring;
ptype_tbl = rxq->adapter->ptype_tbl;
- if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)
- rxq->hw_register_set = 1;
-
while (nb_rx < nb_pkts) {
rx_desc = &rx_desc_ring[rx_id];
@@ -720,10 +743,8 @@ idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (idpf_timestamp_dynflag > 0 &&
(rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP)) {
/* timestamp */
- ts_ns = idpf_tstamp_convert_32b_64b(ad,
- rxq->hw_register_set,
+ ts_ns = idpf_tstamp_convert_32b_64b(ad->time_hw,
rte_le_to_cpu_32(rx_desc->ts_high));
- rxq->hw_register_set = 0;
*RTE_MBUF_DYNFIELD(rxm,
idpf_timestamp_dynfield_offset,
rte_mbuf_timestamp_t *) = ts_ns;
@@ -1077,9 +1098,6 @@ idpf_dp_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->adapter->ptype_tbl;
- if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0)
- rxq->hw_register_set = 1;
-
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
rx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0);
@@ -1142,10 +1160,8 @@ idpf_dp_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (idpf_timestamp_dynflag > 0 &&
(rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {
/* timestamp */
- ts_ns = idpf_tstamp_convert_32b_64b(ad,
- rxq->hw_register_set,
+ ts_ns = idpf_tstamp_convert_32b_64b(ad->time_hw,
rte_le_to_cpu_32(rxd.flex_nic_wb.flex_ts.ts_high));
- rxq->hw_register_set = 0;
*RTE_MBUF_DYNFIELD(rxm,
idpf_timestamp_dynfield_offset,
rte_mbuf_timestamp_t *) = ts_ns;
@@ -1272,10 +1288,8 @@ idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (idpf_timestamp_dynflag > 0 &&
(rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) {
/* timestamp */
- ts_ns = idpf_tstamp_convert_32b_64b(ad,
- rxq->hw_register_set,
+ ts_ns = idpf_tstamp_convert_32b_64b(ad->time_hw,
rte_le_to_cpu_32(rxd.flex_nic_wb.flex_ts.ts_high));
- rxq->hw_register_set = 0;
*RTE_MBUF_DYNFIELD(rxm,
idpf_timestamp_dynfield_offset,
rte_mbuf_timestamp_t *) = ts_ns;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index 6cb83fc0a6..53049b1a31 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -145,7 +145,6 @@ struct idpf_rx_queue {
struct idpf_rx_queue *bufq2;
uint64_t offloads;
- uint32_t hw_register_set;
};
struct idpf_tx_entry {
@@ -303,4 +302,9 @@ __rte_internal
uint16_t idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+__rte_internal
+int idpf_rx_timestamp_start(struct idpf_adapter *base);
+
+__rte_internal
+int idpf_rx_timestamp_stop(struct idpf_adapter *base);
#endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..661c7f5cb9 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -34,6 +34,8 @@ INTERNAL {
idpf_qc_tx_thresh_check;
idpf_qc_tx_vec_avx512_setup;
idpf_qc_txq_mbufs_release;
+ idpf_rx_timestamp_start;
+ idpf_rx_timestamp_stop;
idpf_vc_api_version_check;
idpf_vc_caps_get;
--
2.25.1
next prev parent reply other threads:[~2023-05-19 8:37 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-20 9:19 [PATCH 0/7] update idpf and cpfl timestamp Wenjing Qiao
2023-04-20 9:19 ` [PATCH 1/7] common/idpf: fix 64b timestamp roll over issue Wenjing Qiao
2023-04-21 7:15 ` [PATCH v2 0/7] update idpf and cpfl timestamp Wenjing Qiao
2023-04-21 7:15 ` [PATCH v2 1/7] common/idpf: fix 64b timestamp roll over issue Wenjing Qiao
2023-04-24 9:17 ` [PATCH v3 0/7] fix and enhance idpf and cpfl timestamp Wenjing Qiao
2023-04-24 9:17 ` [PATCH v3 1/7] common/idpf: fix 64b timestamp roll over issue Wenjing Qiao
2023-05-19 8:31 ` [PATCH v4 0/7] fix idpf and cpfl timestamp Wenjing Qiao
2023-05-19 8:31 ` Wenjing Qiao [this message]
2023-05-24 8:36 ` [PATCH v4 1/7] common/idpf: fix 64b timestamp roll over issue Liu, Mingxia
2023-05-19 8:31 ` [PATCH v4 2/7] net/idpf: save main time by alarm Wenjing Qiao
2023-05-19 8:31 ` [PATCH v4 3/7] net/cpfl: " Wenjing Qiao
2023-05-19 8:31 ` [PATCH v4 4/7] common/idpf: enhance timestamp offload feature for ACC Wenjing Qiao
2023-05-19 8:31 ` [PATCH v4 5/7] common/idpf: add timestamp enable flag for rxq Wenjing Qiao
2023-05-19 8:31 ` [PATCH v4 6/7] net/cpfl: adjust timestamp mbuf register Wenjing Qiao
2023-05-19 8:31 ` [PATCH v4 7/7] net/idpf: " Wenjing Qiao
2023-04-24 9:17 ` [PATCH v3 2/7] net/idpf: save main time by alarm Wenjing Qiao
2023-04-24 9:17 ` [PATCH v3 3/7] net/cpfl: " Wenjing Qiao
2023-04-24 9:17 ` [PATCH v3 4/7] common/idpf: enhance timestamp offload feature for ACC Wenjing Qiao
2023-04-24 9:17 ` [PATCH v3 5/7] common/idpf: add timestamp enable flag for rxq Wenjing Qiao
2023-04-24 9:17 ` [PATCH v3 6/7] net/cpfl: register timestamp mbuf when starting dev Wenjing Qiao
2023-04-28 3:24 ` Zhang, Qi Z
2023-04-24 9:17 ` [PATCH v3 7/7] net/idpf: " Wenjing Qiao
2023-04-21 7:15 ` [PATCH v2 2/7] net/idpf: save main time by alarm Wenjing Qiao
2023-04-28 2:46 ` Zhang, Qi Z
2023-04-21 7:15 ` [PATCH v2 3/7] net/cpfl: " Wenjing Qiao
2023-04-21 7:16 ` [PATCH v2 4/7] common/idpf: support timestamp offload feature for ACC Wenjing Qiao
2023-04-21 7:16 ` [PATCH v2 5/7] common/idpf: add timestamp enable flag for rxq Wenjing Qiao
2023-04-21 7:16 ` [PATCH v2 6/7] net/cpfl: register timestamp mbuf when starting dev Wenjing Qiao
2023-04-21 7:16 ` [PATCH v2 7/7] net/idpf: " Wenjing Qiao
2023-04-20 9:19 ` [PATCH 2/7] net/idpf: save master time by alarm Wenjing Qiao
2023-04-20 9:19 ` [PATCH 3/7] net/cpfl: " Wenjing Qiao
2023-04-20 9:19 ` [PATCH 4/7] common/idpf: support timestamp offload feature for ACC Wenjing Qiao
2023-04-20 9:19 ` [PATCH 5/7] common/idpf: add timestamp enable flag for rxq Wenjing Qiao
2023-04-20 9:19 ` [PATCH 6/7] net/cpfl: register timestamp mbuf when starting dev Wenjing Qiao
2023-04-20 9:19 ` [PATCH 7/7] net/idpf: " Wenjing Qiao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230519083110.809913-2-wenjing.qiao@intel.com \
--to=wenjing.qiao@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=mingxia.liu@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).