patches for DPDK stable branches
 help / color / mirror / Atom feed
From: "Su, Simei" <simei.su@intel.com>
To: "stable@dpdk.org" <stable@dpdk.org>
Subject: RE: [PATCH] net/ice: fix race condition for multi-cores
Date: Wed, 8 Jun 2022 02:31:15 +0000	[thread overview]
Message-ID: <DM6PR11MB32752D79BF2FAF0AA17B4A429CA49@DM6PR11MB3275.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20220608020923.233748-1-simei.su@intel.com>

Sorry that I sent the wrong mail, ignore it.

> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, June 8, 2022 10:09 AM
> To: Wu, Wenjun1 <wenjun1.wu@intel.com>
> Cc: Su, Simei <simei.su@intel.com>; stable@dpdk.org
> Subject: [PATCH] net/ice: fix race condition for multi-cores
> 
> In multi-cores cases for Rx timestamp offload, to avoid phc time being
> frequently overwritten, move related variables from ice_adapter to
> ice_rx_queue structure, and each queue will handle timestamp calculation by
> itself.
> 
> Fixes: 953e74e6b73a ("net/ice: enable Rx timestamp on flex descriptor")
> Fixes: 5543827fc6df ("net/ice: improve performance of Rx timestamp offload")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
>  drivers/net/ice/ice_ethdev.h |  3 ---
>  drivers/net/ice/ice_rxtx.c   | 48 ++++++++++++++++++++++----------------------
>  drivers/net/ice/ice_rxtx.h   |  3 +++
>  3 files changed, 27 insertions(+), 27 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index
> f9f4a1c..c257bb2 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -606,9 +606,6 @@ struct ice_adapter {
>  	struct rte_timecounter tx_tstamp_tc;
>  	bool ptp_ena;
>  	uint64_t time_hw;
> -	uint32_t hw_time_high; /* high 32 bits of timestamp */
> -	uint32_t hw_time_low; /* low 32 bits of timestamp */
> -	uint64_t hw_time_update; /* SW time of HW record updating */
>  	struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
>  	struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
>  	/* True if DCF state of the associated PF is on */ diff --git
> a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> 91cdc56..71e5c6f 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -1593,7 +1593,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
>  	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
>  		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz()
> / 1000);
> 
> -		if (unlikely(sw_cur_time - ad->hw_time_update > 4))
> +		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
>  			is_tsinit = 1;
>  	}
>  #endif
> @@ -1637,16 +1637,16 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
>  				if (unlikely(is_tsinit)) {
>  					ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
>  									   rxq->time_high);
> -					ad->hw_time_low = (uint32_t)ts_ns;
> -					ad->hw_time_high = (uint32_t)(ts_ns >> 32);
> +					rxq->hw_time_low = (uint32_t)ts_ns;
> +					rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
>  					is_tsinit = false;
>  				} else {
> -					if (rxq->time_high < ad->hw_time_low)
> -						ad->hw_time_high += 1;
> -					ts_ns = (uint64_t)ad->hw_time_high << 32 |
> rxq->time_high;
> -					ad->hw_time_low = rxq->time_high;
> +					if (rxq->time_high < rxq->hw_time_low)
> +						rxq->hw_time_high += 1;
> +					ts_ns = (uint64_t)rxq->hw_time_high << 32 |
> rxq->time_high;
> +					rxq->hw_time_low = rxq->time_high;
>  				}
> -				ad->hw_time_update = rte_get_timer_cycles() /
> +				rxq->hw_time_update = rte_get_timer_cycles() /
>  						     (rte_get_timer_hz() / 1000);
>  				*RTE_MBUF_DYNFIELD(mb,
>  						   ice_timestamp_dynfield_offset, @@ -1859,7
> +1859,7 @@ ice_recv_scattered_pkts(void *rx_queue,
>  	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
>  		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz()
> / 1000);
> 
> -		if (unlikely(sw_cur_time - ad->hw_time_update > 4))
> +		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
>  			is_tsinit = true;
>  	}
>  #endif
> @@ -1979,16 +1979,16 @@ ice_recv_scattered_pkts(void *rx_queue,
>  			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
>  			if (unlikely(is_tsinit)) {
>  				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
> rxq->time_high);
> -				ad->hw_time_low = (uint32_t)ts_ns;
> -				ad->hw_time_high = (uint32_t)(ts_ns >> 32);
> +				rxq->hw_time_low = (uint32_t)ts_ns;
> +				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
>  				is_tsinit = false;
>  			} else {
> -				if (rxq->time_high < ad->hw_time_low)
> -					ad->hw_time_high += 1;
> -				ts_ns = (uint64_t)ad->hw_time_high << 32 |
> rxq->time_high;
> -				ad->hw_time_low = rxq->time_high;
> +				if (rxq->time_high < rxq->hw_time_low)
> +					rxq->hw_time_high += 1;
> +				ts_ns = (uint64_t)rxq->hw_time_high << 32 |
> rxq->time_high;
> +				rxq->hw_time_low = rxq->time_high;
>  			}
> -			ad->hw_time_update = rte_get_timer_cycles() /
> +			rxq->hw_time_update = rte_get_timer_cycles() /
>  					     (rte_get_timer_hz() / 1000);
>  			*RTE_MBUF_DYNFIELD(rxm,
>  					   (ice_timestamp_dynfield_offset), @@ -2369,7
> +2369,7 @@ ice_recv_pkts(void *rx_queue,
>  	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
>  		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz()
> / 1000);
> 
> -		if (unlikely(sw_cur_time - ad->hw_time_update > 4))
> +		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
>  			is_tsinit = 1;
>  	}
>  #endif
> @@ -2430,16 +2430,16 @@ ice_recv_pkts(void *rx_queue,
>  			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
>  			if (unlikely(is_tsinit)) {
>  				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
> rxq->time_high);
> -				ad->hw_time_low = (uint32_t)ts_ns;
> -				ad->hw_time_high = (uint32_t)(ts_ns >> 32);
> +				rxq->hw_time_low = (uint32_t)ts_ns;
> +				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
>  				is_tsinit = false;
>  			} else {
> -				if (rxq->time_high < ad->hw_time_low)
> -					ad->hw_time_high += 1;
> -				ts_ns = (uint64_t)ad->hw_time_high << 32 |
> rxq->time_high;
> -				ad->hw_time_low = rxq->time_high;
> +				if (rxq->time_high < rxq->hw_time_low)
> +					rxq->hw_time_high += 1;
> +				ts_ns = (uint64_t)rxq->hw_time_high << 32 |
> rxq->time_high;
> +				rxq->hw_time_low = rxq->time_high;
>  			}
> -			ad->hw_time_update = rte_get_timer_cycles() /
> +			rxq->hw_time_update = rte_get_timer_cycles() /
>  					     (rte_get_timer_hz() / 1000);
>  			*RTE_MBUF_DYNFIELD(rxm,
>  					   (ice_timestamp_dynfield_offset), diff --git
> a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index
> bb18a01..f5337d5 100644
> --- a/drivers/net/ice/ice_rxtx.h
> +++ b/drivers/net/ice/ice_rxtx.h
> @@ -95,6 +95,9 @@ struct ice_rx_queue {
>  	uint32_t time_high;
>  	uint32_t hw_register_set;
>  	const struct rte_memzone *mz;
> +	uint32_t hw_time_high; /* high 32 bits of timestamp */
> +	uint32_t hw_time_low; /* low 32 bits of timestamp */
> +	uint64_t hw_time_update; /* SW time of HW record updating */
>  };
> 
>  struct ice_tx_entry {
> --
> 2.9.5


  reply	other threads:[~2022-06-08  2:31 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-08  2:09 Simei Su
2022-06-08  2:31 ` Su, Simei [this message]
2022-06-08  2:46 Simei Su
2022-06-09  8:36 ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DM6PR11MB32752D79BF2FAF0AA17B4A429CA49@DM6PR11MB3275.namprd11.prod.outlook.com \
    --to=simei.su@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).