* [PATCH v1] net/iavf: fix race condition for multi-cores
@ 2022-05-19 1:19 Wenjun Wu
2022-05-19 3:13 ` Stephen Hemminger
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: Wenjun Wu @ 2022-05-19 1:19 UTC (permalink / raw)
To: dev, jingjing.wu, beilei.xing, qi.z.zhang, simei.su; +Cc: Wenjun Wu
In multi-cores cases for RX timestamp offload, if packets arrive
too fast, aq command to get phc time will be pended.
This patch adds spinlock to fix this issue. To avoid phc time being
frequently overwritten, move related variables to iavf_rx_queue
structure, and each queue will handle timestamp calculation by itself.
Fixes: 2a0a62f40af5 ("net/iavf: enable Rx timestamp on Flex Descriptor")
Fixes: 33db16136e55 ("net/iavf: improve performance of Rx timestamp offload")
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
drivers/net/iavf/iavf.h | 5 ++-
drivers/net/iavf/iavf_ethdev.c | 9 ++----
drivers/net/iavf/iavf_rxtx.c | 56 ++++++++++++++++++++--------------
drivers/net/iavf/iavf_rxtx.h | 2 ++
drivers/net/iavf/iavf_vchnl.c | 17 ++++++-----
5 files changed, 49 insertions(+), 40 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index dd83567e59..379f25fbf2 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -270,6 +270,7 @@ struct iavf_info {
struct rte_eth_dev *eth_dev;
uint32_t ptp_caps;
+ rte_spinlock_t phc_time_aq_lock;
};
#define IAVF_MAX_PKT_TYPE 1024
@@ -314,8 +315,6 @@ struct iavf_adapter {
bool stopped;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
- uint64_t phc_time;
- uint64_t hw_time_update;
};
/* IAVF_DEV_PRIVATE_TO */
@@ -481,5 +480,5 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
uint8_t *resp_msg, size_t resp_msg_len);
extern const struct rte_tm_ops iavf_tm_ops;
int iavf_get_ptp_cap(struct iavf_adapter *adapter);
-int iavf_get_phc_time(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_rx_queue *rxq);
#endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 82672841f4..defb6eef14 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1020,13 +1020,8 @@ iavf_dev_start(struct rte_eth_dev *dev)
}
if (dev->data->dev_conf.rxmode.offloads &
- RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- if (iavf_get_phc_time(adapter)) {
- PMD_DRV_LOG(ERR, "get physical time failed");
- goto err_mac;
- }
- adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- }
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rte_spinlock_init(&vf->phc_time_aq_lock);
return 0;
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index d3b1a58b27..7c1ad6719d 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -886,6 +886,16 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
RTE_ETH_QUEUE_STATE_STARTED;
}
+ if (dev->data->dev_conf.rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ rte_spinlock_init(&vf->phc_time_aq_lock);
+ if (iavf_get_phc_time(rxq)) {
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ return err;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ }
+
return err;
}
@@ -1422,6 +1432,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
uint64_t dma_addr;
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
+ uint64_t ts_ns;
nb_rx = 0;
nb_hold = 0;
@@ -1430,15 +1441,13 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
- uint64_t ts_ns;
-
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1505,11 +1514,11 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
iavf_timestamp_dynfield_offset,
@@ -1545,7 +1554,6 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
@@ -1554,10 +1562,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1674,11 +1683,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(first_seg,
iavf_timestamp_dynfield_offset,
@@ -1881,7 +1890,6 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
@@ -1895,10 +1903,11 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1959,11 +1968,12 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
iavf_timestamp_dynfield_offset,
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 642b9a700a..1b89d16fc7 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -222,6 +222,8 @@ struct iavf_rx_queue {
/* flexible descriptor metadata extraction offload flag */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
+ uint64_t phc_time;
+ uint64_t hw_time_update;
};
struct iavf_tx_entry {
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index b654433135..36481c9a56 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1896,12 +1896,13 @@ iavf_get_ptp_cap(struct iavf_adapter *adapter)
}
int
-iavf_get_phc_time(struct iavf_adapter *adapter)
+iavf_get_phc_time(struct iavf_rx_queue *rxq)
{
+ struct iavf_adapter *adapter = rxq->vsi->adapter;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct virtchnl_phc_time phc_time;
struct iavf_cmd_info args;
- int err;
+ int err = 0;
args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
args.in_args = (uint8_t *)&phc_time;
@@ -1909,14 +1910,16 @@ iavf_get_phc_time(struct iavf_adapter *adapter)
args.out_buffer = vf->aq_resp;
args.out_size = IAVF_AQ_BUF_SZ;
+ rte_spinlock_lock(&vf->phc_time_aq_lock);
err = iavf_execute_vf_cmd(adapter, &args, 0);
if (err) {
PMD_DRV_LOG(ERR,
- "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
- return err;
+ "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
+ goto out;
}
+ rxq->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
- adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
-
- return 0;
+out:
+ rte_spinlock_unlock(&vf->phc_time_aq_lock);
+ return err;
}
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v1] net/iavf: fix race condition for multi-cores
2022-05-19 1:19 [PATCH v1] net/iavf: fix race condition for multi-cores Wenjun Wu
@ 2022-05-19 3:13 ` Stephen Hemminger
2022-05-19 7:33 ` [PATCH v2] " Wenjun Wu
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Stephen Hemminger @ 2022-05-19 3:13 UTC (permalink / raw)
To: Wenjun Wu; +Cc: dev, jingjing.wu, beilei.xing, qi.z.zhang, simei.su
On Thu, 19 May 2022 09:19:59 +0800
Wenjun Wu <wenjun1.wu@intel.com> wrote:
> if (dev->data->dev_conf.rxmode.offloads &
> - RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
> - if (iavf_get_phc_time(adapter)) {
> - PMD_DRV_LOG(ERR, "get physical time failed");
> - goto err_mac;
> - }
> - adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
> - }
> + RTE_ETH_RX_OFFLOAD_TIMESTAMP)
> + rte_spinlock_init(&vf->phc_time_aq_lock);
Be safe just always init the spin_lock. It is just a simple assignment.
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v2] net/iavf: fix race condition for multi-cores
2022-05-19 1:19 [PATCH v1] net/iavf: fix race condition for multi-cores Wenjun Wu
2022-05-19 3:13 ` Stephen Hemminger
@ 2022-05-19 7:33 ` Wenjun Wu
2022-05-19 15:27 ` Stephen Hemminger
2022-05-20 7:35 ` [PATCH v3] " Wenjun Wu
2022-05-23 4:49 ` [PATCH v4] " Wenjun Wu
3 siblings, 1 reply; 7+ messages in thread
From: Wenjun Wu @ 2022-05-19 7:33 UTC (permalink / raw)
To: dev, jingjing.wu, beilei.xing, qi.z.zhang, simei.su; +Cc: Wenjun Wu
In multi-cores cases for RX timestamp offload, if packets arrive
too fast, aq command to get phc time will be pended.
This patch adds spinlock to fix this issue. To avoid phc time being
frequently overwritten, move related variables to iavf_rx_queue
structure, and each queue will handle timestamp calculation by itself.
Fixes: 2a0a62f40af5 ("net/iavf: enable Rx timestamp on Flex Descriptor")
Fixes: 33db16136e55 ("net/iavf: improve performance of Rx timestamp offload")
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
v2: move duplicated spinlock initialization.
---
drivers/net/iavf/iavf.h | 5 ++--
drivers/net/iavf/iavf_ethdev.c | 13 +++-----
drivers/net/iavf/iavf_rxtx.c | 55 ++++++++++++++++++++--------------
drivers/net/iavf/iavf_rxtx.h | 2 ++
drivers/net/iavf/iavf_vchnl.c | 17 ++++++-----
5 files changed, 50 insertions(+), 42 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index dd83567e59..379f25fbf2 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -270,6 +270,7 @@ struct iavf_info {
struct rte_eth_dev *eth_dev;
uint32_t ptp_caps;
+ rte_spinlock_t phc_time_aq_lock;
};
#define IAVF_MAX_PKT_TYPE 1024
@@ -314,8 +315,6 @@ struct iavf_adapter {
bool stopped;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
- uint64_t phc_time;
- uint64_t hw_time_update;
};
/* IAVF_DEV_PRIVATE_TO */
@@ -481,5 +480,5 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
uint8_t *resp_msg, size_t resp_msg_len);
extern const struct rte_tm_ops iavf_tm_ops;
int iavf_get_ptp_cap(struct iavf_adapter *adapter);
-int iavf_get_phc_time(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_rx_queue *rxq);
#endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 82672841f4..1b8d9b86c8 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1014,20 +1014,15 @@ iavf_dev_start(struct rte_eth_dev *dev)
iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
true);
+ if (dev->data->dev_conf.rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rte_spinlock_init(&vf->phc_time_aq_lock);
+
if (iavf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed");
goto err_mac;
}
- if (dev->data->dev_conf.rxmode.offloads &
- RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- if (iavf_get_phc_time(adapter)) {
- PMD_DRV_LOG(ERR, "get physical time failed");
- goto err_mac;
- }
- adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- }
-
return 0;
err_mac:
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index d3b1a58b27..b61da94f07 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -886,6 +886,15 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
RTE_ETH_QUEUE_STATE_STARTED;
}
+ if (dev->data->dev_conf.rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ if (iavf_get_phc_time(rxq)) {
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ return err;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ }
+
return err;
}
@@ -1422,6 +1431,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
uint64_t dma_addr;
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
+ uint64_t ts_ns;
nb_rx = 0;
nb_hold = 0;
@@ -1430,15 +1440,13 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
- uint64_t ts_ns;
-
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1505,11 +1513,11 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
iavf_timestamp_dynfield_offset,
@@ -1545,7 +1553,6 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
@@ -1554,10 +1561,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1674,11 +1682,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(first_seg,
iavf_timestamp_dynfield_offset,
@@ -1881,7 +1889,6 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
@@ -1895,10 +1902,11 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1959,11 +1967,12 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
iavf_timestamp_dynfield_offset,
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 642b9a700a..1b89d16fc7 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -222,6 +222,8 @@ struct iavf_rx_queue {
/* flexible descriptor metadata extraction offload flag */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
+ uint64_t phc_time;
+ uint64_t hw_time_update;
};
struct iavf_tx_entry {
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index b654433135..36481c9a56 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1896,12 +1896,13 @@ iavf_get_ptp_cap(struct iavf_adapter *adapter)
}
int
-iavf_get_phc_time(struct iavf_adapter *adapter)
+iavf_get_phc_time(struct iavf_rx_queue *rxq)
{
+ struct iavf_adapter *adapter = rxq->vsi->adapter;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct virtchnl_phc_time phc_time;
struct iavf_cmd_info args;
- int err;
+ int err = 0;
args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
args.in_args = (uint8_t *)&phc_time;
@@ -1909,14 +1910,16 @@ iavf_get_phc_time(struct iavf_adapter *adapter)
args.out_buffer = vf->aq_resp;
args.out_size = IAVF_AQ_BUF_SZ;
+ rte_spinlock_lock(&vf->phc_time_aq_lock);
err = iavf_execute_vf_cmd(adapter, &args, 0);
if (err) {
PMD_DRV_LOG(ERR,
- "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
- return err;
+ "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
+ goto out;
}
+ rxq->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
- adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
-
- return 0;
+out:
+ rte_spinlock_unlock(&vf->phc_time_aq_lock);
+ return err;
}
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v2] net/iavf: fix race condition for multi-cores
2022-05-19 7:33 ` [PATCH v2] " Wenjun Wu
@ 2022-05-19 15:27 ` Stephen Hemminger
0 siblings, 0 replies; 7+ messages in thread
From: Stephen Hemminger @ 2022-05-19 15:27 UTC (permalink / raw)
To: Wenjun Wu; +Cc: dev, jingjing.wu, beilei.xing, qi.z.zhang, simei.su
On Thu, 19 May 2022 15:33:34 +0800
Wenjun Wu <wenjun1.wu@intel.com> wrote:
>
> + if (dev->data->dev_conf.rxmode.offloads &
> + RTE_ETH_RX_OFFLOAD_TIMESTAMP)
> + rte_spinlock_init(&vf->phc_time_aq_lock);
> +
There is no reason this init needs to be only true with some flags.
Just always do it.
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v3] net/iavf: fix race condition for multi-cores
2022-05-19 1:19 [PATCH v1] net/iavf: fix race condition for multi-cores Wenjun Wu
2022-05-19 3:13 ` Stephen Hemminger
2022-05-19 7:33 ` [PATCH v2] " Wenjun Wu
@ 2022-05-20 7:35 ` Wenjun Wu
2022-05-23 4:49 ` [PATCH v4] " Wenjun Wu
3 siblings, 0 replies; 7+ messages in thread
From: Wenjun Wu @ 2022-05-20 7:35 UTC (permalink / raw)
To: dev, jingjing.wu, beilei.xing, qi.z.zhang, simei.su; +Cc: stephen, Wenjun Wu
In multi-cores cases for RX timestamp offload, if packets arrive
too fast, aq command to get phc time will be pended.
This patch adds spinlock to fix this issue. To avoid phc time being
frequently overwritten, move related variables to iavf_rx_queue
structure, and each queue will handle timestamp calculation by itself.
Fixes: 2a0a62f40af5 ("net/iavf: enable Rx timestamp on Flex Descriptor")
Fixes: 33db16136e55 ("net/iavf: improve performance of Rx timestamp offload")
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
v3: remove restrictions on initialization.
v2: move duplicated spinlock initialization.
---
drivers/net/iavf/iavf.h | 5 ++--
drivers/net/iavf/iavf_ethdev.c | 11 ++-----
drivers/net/iavf/iavf_rxtx.c | 55 ++++++++++++++++++++--------------
drivers/net/iavf/iavf_rxtx.h | 2 ++
drivers/net/iavf/iavf_vchnl.c | 15 ++++++----
5 files changed, 47 insertions(+), 41 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index dd83567e59..379f25fbf2 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -270,6 +270,7 @@ struct iavf_info {
struct rte_eth_dev *eth_dev;
uint32_t ptp_caps;
+ rte_spinlock_t phc_time_aq_lock;
};
#define IAVF_MAX_PKT_TYPE 1024
@@ -314,8 +315,6 @@ struct iavf_adapter {
bool stopped;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
- uint64_t phc_time;
- uint64_t hw_time_update;
};
/* IAVF_DEV_PRIVATE_TO */
@@ -481,5 +480,5 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
uint8_t *resp_msg, size_t resp_msg_len);
extern const struct rte_tm_ops iavf_tm_ops;
int iavf_get_ptp_cap(struct iavf_adapter *adapter);
-int iavf_get_phc_time(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_rx_queue *rxq);
#endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 82672841f4..30a0cc1a02 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1014,20 +1014,13 @@ iavf_dev_start(struct rte_eth_dev *dev)
iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
true);
+ rte_spinlock_init(&vf->phc_time_aq_lock);
+
if (iavf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed");
goto err_mac;
}
- if (dev->data->dev_conf.rxmode.offloads &
- RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- if (iavf_get_phc_time(adapter)) {
- PMD_DRV_LOG(ERR, "get physical time failed");
- goto err_mac;
- }
- adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- }
-
return 0;
err_mac:
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index d3b1a58b27..b61da94f07 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -886,6 +886,15 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
RTE_ETH_QUEUE_STATE_STARTED;
}
+ if (dev->data->dev_conf.rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ if (iavf_get_phc_time(rxq)) {
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ return err;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ }
+
return err;
}
@@ -1422,6 +1431,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
uint64_t dma_addr;
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
+ uint64_t ts_ns;
nb_rx = 0;
nb_hold = 0;
@@ -1430,15 +1440,13 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
- uint64_t ts_ns;
-
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1505,11 +1513,11 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
iavf_timestamp_dynfield_offset,
@@ -1545,7 +1553,6 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
@@ -1554,10 +1561,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1674,11 +1682,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(first_seg,
iavf_timestamp_dynfield_offset,
@@ -1881,7 +1889,6 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
@@ -1895,10 +1902,11 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1959,11 +1967,12 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
iavf_timestamp_dynfield_offset,
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 642b9a700a..1b89d16fc7 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -222,6 +222,8 @@ struct iavf_rx_queue {
/* flexible descriptor metadata extraction offload flag */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
+ uint64_t phc_time;
+ uint64_t hw_time_update;
};
struct iavf_tx_entry {
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index b654433135..2d2647d42c 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1896,12 +1896,13 @@ iavf_get_ptp_cap(struct iavf_adapter *adapter)
}
int
-iavf_get_phc_time(struct iavf_adapter *adapter)
+iavf_get_phc_time(struct iavf_rx_queue *rxq)
{
+ struct iavf_adapter *adapter = rxq->vsi->adapter;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct virtchnl_phc_time phc_time;
struct iavf_cmd_info args;
- int err;
+ int err = 0;
args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
args.in_args = (uint8_t *)&phc_time;
@@ -1909,14 +1910,16 @@ iavf_get_phc_time(struct iavf_adapter *adapter)
args.out_buffer = vf->aq_resp;
args.out_size = IAVF_AQ_BUF_SZ;
+ rte_spinlock_lock(&vf->phc_time_aq_lock);
err = iavf_execute_vf_cmd(adapter, &args, 0);
if (err) {
PMD_DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
- return err;
+ goto out;
}
+ rxq->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
- adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
-
- return 0;
+out:
+ rte_spinlock_unlock(&vf->phc_time_aq_lock);
+ return err;
}
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v4] net/iavf: fix race condition for multi-cores
2022-05-19 1:19 [PATCH v1] net/iavf: fix race condition for multi-cores Wenjun Wu
` (2 preceding siblings ...)
2022-05-20 7:35 ` [PATCH v3] " Wenjun Wu
@ 2022-05-23 4:49 ` Wenjun Wu
2022-05-23 5:17 ` Zhang, Qi Z
3 siblings, 1 reply; 7+ messages in thread
From: Wenjun Wu @ 2022-05-23 4:49 UTC (permalink / raw)
To: dev, jingjing.wu, beilei.xing, qi.z.zhang, simei.su; +Cc: stephen, Wenjun Wu
In multi-cores cases for RX timestamp offload, if packets arrive
too fast, aq command to get phc time will be pended.
This patch adds spinlock to fix this issue. To avoid phc time being
frequently overwritten, move related variables to iavf_rx_queue
structure, and each queue will handle timestamp calculation by itself.
Fixes: 2a0a62f40af5 ("net/iavf: enable Rx timestamp on Flex Descriptor")
Fixes: 33db16136e55 ("net/iavf: improve performance of Rx timestamp offload")
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
v4: rebase to the latest commit.
v3: remove restrictions on initialization.
v2: move duplicated spinlock initialization.
---
drivers/net/iavf/iavf.h | 5 ++--
drivers/net/iavf/iavf_ethdev.c | 11 ++-----
drivers/net/iavf/iavf_rxtx.c | 55 ++++++++++++++++++++--------------
drivers/net/iavf/iavf_rxtx.h | 2 ++
drivers/net/iavf/iavf_vchnl.c | 15 ++++++----
5 files changed, 47 insertions(+), 41 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 819510649a..f1c2daa06e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -270,6 +270,7 @@ struct iavf_info {
struct rte_eth_dev *eth_dev;
uint32_t ptp_caps;
+ rte_spinlock_t phc_time_aq_lock;
};
#define IAVF_MAX_PKT_TYPE 1024
@@ -315,8 +316,6 @@ struct iavf_adapter {
bool closed;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
- uint64_t phc_time;
- uint64_t hw_time_update;
};
/* IAVF_DEV_PRIVATE_TO */
@@ -482,5 +481,5 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
uint8_t *resp_msg, size_t resp_msg_len);
extern const struct rte_tm_ops iavf_tm_ops;
int iavf_get_ptp_cap(struct iavf_adapter *adapter);
-int iavf_get_phc_time(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_rx_queue *rxq);
#endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index a74056f0f1..7a3b37d5e3 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1039,20 +1039,13 @@ iavf_dev_start(struct rte_eth_dev *dev)
iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
true);
+ rte_spinlock_init(&vf->phc_time_aq_lock);
+
if (iavf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed");
goto err_mac;
}
- if (dev->data->dev_conf.rxmode.offloads &
- RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
- if (iavf_get_phc_time(adapter)) {
- PMD_DRV_LOG(ERR, "get physical time failed");
- goto err_mac;
- }
- adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- }
-
return 0;
err_mac:
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index ff0c98ffc3..bb3d7ea434 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -904,6 +904,15 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
RTE_ETH_QUEUE_STATE_STARTED;
}
+ if (dev->data->dev_conf.rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ if (iavf_get_phc_time(rxq)) {
+ PMD_DRV_LOG(ERR, "get physical time failed");
+ return err;
+ }
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ }
+
return err;
}
@@ -1440,6 +1449,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
uint64_t dma_addr;
uint64_t pkt_flags;
const uint32_t *ptype_tbl;
+ uint64_t ts_ns;
nb_rx = 0;
nb_hold = 0;
@@ -1448,15 +1458,13 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
- uint64_t ts_ns;
-
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1523,11 +1531,11 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
iavf_timestamp_dynfield_offset,
@@ -1563,7 +1571,6 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
@@ -1572,10 +1579,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1692,11 +1700,11 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(first_seg,
iavf_timestamp_dynfield_offset,
@@ -1899,7 +1907,6 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
int32_t nb_staged = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct iavf_adapter *ad = rxq->vsi->adapter;
uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
@@ -1913,10 +1920,11 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (sw_cur_time - ad->hw_time_update > 4) {
- if (iavf_get_phc_time(ad))
+
+ if (sw_cur_time - rxq->hw_time_update > 4) {
+ if (iavf_get_phc_time(rxq))
PMD_DRV_LOG(ERR, "get physical time failed");
- ad->hw_time_update = sw_cur_time;
+ rxq->hw_time_update = sw_cur_time;
}
}
@@ -1977,11 +1985,12 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
if (iavf_timestamp_dynflag > 0) {
- ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ ts_ns = iavf_tstamp_convert_32b_64b(rxq->phc_time,
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
- ad->phc_time = ts_ns;
- ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+ rxq->phc_time = ts_ns;
+ rxq->hw_time_update = rte_get_timer_cycles() /
+ (rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
iavf_timestamp_dynfield_offset,
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e8362bbd1d..1695e43cd5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -223,6 +223,8 @@ struct iavf_rx_queue {
/* flexible descriptor metadata extraction offload flag */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
+ uint64_t phc_time;
+ uint64_t hw_time_update;
};
struct iavf_tx_entry {
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0520b1045f..21bd1e2193 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1913,12 +1913,13 @@ iavf_get_ptp_cap(struct iavf_adapter *adapter)
}
int
-iavf_get_phc_time(struct iavf_adapter *adapter)
+iavf_get_phc_time(struct iavf_rx_queue *rxq)
{
+ struct iavf_adapter *adapter = rxq->vsi->adapter;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct virtchnl_phc_time phc_time;
struct iavf_cmd_info args;
- int err;
+ int err = 0;
args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
args.in_args = (uint8_t *)&phc_time;
@@ -1926,14 +1927,16 @@ iavf_get_phc_time(struct iavf_adapter *adapter)
args.out_buffer = vf->aq_resp;
args.out_size = IAVF_AQ_BUF_SZ;
+ rte_spinlock_lock(&vf->phc_time_aq_lock);
err = iavf_execute_vf_cmd(adapter, &args, 0);
if (err) {
PMD_DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
- return err;
+ goto out;
}
+ rxq->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
- adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
-
- return 0;
+out:
+ rte_spinlock_unlock(&vf->phc_time_aq_lock);
+ return err;
}
--
2.25.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH v4] net/iavf: fix race condition for multi-cores
2022-05-23 4:49 ` [PATCH v4] " Wenjun Wu
@ 2022-05-23 5:17 ` Zhang, Qi Z
0 siblings, 0 replies; 7+ messages in thread
From: Zhang, Qi Z @ 2022-05-23 5:17 UTC (permalink / raw)
To: Wu, Wenjun1, dev, Wu, Jingjing, Xing, Beilei, Su, Simei; +Cc: stephen
> -----Original Message-----
> From: Wu, Wenjun1 <wenjun1.wu@intel.com>
> Sent: Monday, May 23, 2022 12:49 PM
> To: dev@dpdk.org; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Su, Simei
> <simei.su@intel.com>
> Cc: stephen@networkplumber.org; Wu, Wenjun1 <wenjun1.wu@intel.com>
> Subject: [PATCH v4] net/iavf: fix race condition for multi-cores
>
> In multi-cores cases for RX timestamp offload, if packets arrive too fast, aq
> command to get phc time will be pended.
>
> This patch adds spinlock to fix this issue. To avoid phc time being frequently
> overwritten, move related variables to iavf_rx_queue structure, and each
> queue will handle timestamp calculation by itself.
>
> Fixes: 2a0a62f40af5 ("net/iavf: enable Rx timestamp on Flex Descriptor")
> Fixes: 33db16136e55 ("net/iavf: improve performance of Rx timestamp
> offload")
>
> Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2022-05-23 5:17 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-19 1:19 [PATCH v1] net/iavf: fix race condition for multi-cores Wenjun Wu
2022-05-19 3:13 ` Stephen Hemminger
2022-05-19 7:33 ` [PATCH v2] " Wenjun Wu
2022-05-19 15:27 ` Stephen Hemminger
2022-05-20 7:35 ` [PATCH v3] " Wenjun Wu
2022-05-23 4:49 ` [PATCH v4] " Wenjun Wu
2022-05-23 5:17 ` Zhang, Qi Z
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).