DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/2] bugfix and replace on use of stdatomic API
@ 2023-12-11  7:39 Jie Hai
  2023-12-11  7:39 ` [PATCH 1/2] eal: fix constraints on " Jie Hai
  2023-12-11  7:39 ` [PATCH 2/2] net/hns3: use " Jie Hai
  0 siblings, 2 replies; 8+ messages in thread
From: Jie Hai @ 2023-12-11  7:39 UTC (permalink / raw)
  To: dev; +Cc: lihuisong, fengchengwen, liudongdong3, haijie1

This patchset fixes bug on stdatomic API in lib and test,
and replaces all __atomic_XX with rte_atomic_XXX of hns3 driver.

Jie Hai (2):
  eal: fix constraints on stdatomic API
  net/hns3: use stdatomic API

 app/test/test_atomic.c               |  6 +--
 drivers/net/hns3/hns3_cmd.c          | 24 ++++++----
 drivers/net/hns3/hns3_dcb.c          |  3 +-
 drivers/net/hns3/hns3_ethdev.c       | 52 +++++++++++++--------
 drivers/net/hns3/hns3_ethdev.h       | 36 ++++++++-------
 drivers/net/hns3/hns3_ethdev_vf.c    | 68 +++++++++++++++-------------
 drivers/net/hns3/hns3_intr.c         | 47 ++++++++++---------
 drivers/net/hns3/hns3_intr.h         |  4 +-
 drivers/net/hns3/hns3_mbx.c          |  9 ++--
 drivers/net/hns3/hns3_mp.c           |  9 ++--
 drivers/net/hns3/hns3_rxtx.c         | 15 ++++--
 drivers/net/hns3/hns3_tm.c           |  6 ++-
 lib/eal/include/generic/rte_atomic.h | 12 ++---
 13 files changed, 169 insertions(+), 122 deletions(-)

-- 
2.30.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/2] eal: fix constraints on stdatomic API
  2023-12-11  7:39 [PATCH 0/2] bugfix and replace on use of stdatomic API Jie Hai
@ 2023-12-11  7:39 ` Jie Hai
  2023-12-11 18:53   ` Tyler Retzlaff
  2023-12-11  7:39 ` [PATCH 2/2] net/hns3: use " Jie Hai
  1 sibling, 1 reply; 8+ messages in thread
From: Jie Hai @ 2023-12-11  7:39 UTC (permalink / raw)
  To: dev, Morten Brørup, Tyler Retzlaff, Konstantin Ananyev
  Cc: lihuisong, fengchengwen, liudongdong3, haijie1

The first parameter of rte_atomic_exchange_explicit() must be a
pointer to _Atomic type. If run command "meson setup --werror
-Denable_stdatomic=true build && ninja -C build", error will occur.
Thia patch fixes it.

Fixes: 1ec6a845b5cb ("eal: use stdatomic API in public headers")
Cc: stable@dpdk.org

Signed-off-by: Jie Hai <haijie1@huawei.com>
---
 app/test/test_atomic.c               |  6 +++---
 lib/eal/include/generic/rte_atomic.h | 12 ++++++------
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c
index db07159e81ab..c3cb3ae0ea57 100644
--- a/app/test/test_atomic.c
+++ b/app/test/test_atomic.c
@@ -347,9 +347,9 @@ typedef union {
 const uint8_t CRC8_POLY = 0x91;
 uint8_t crc8_table[256];
 
-volatile uint16_t token16;
-volatile uint32_t token32;
-volatile uint64_t token64;
+volatile RTE_ATOMIC(uint16_t) token16;
+volatile RTE_ATOMIC(uint32_t) token32;
+volatile RTE_ATOMIC(uint64_t) token64;
 
 static void
 build_crc8_table(void)
diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
index 0e639dad76a4..38c3b41f9c68 100644
--- a/lib/eal/include/generic/rte_atomic.h
+++ b/lib/eal/include/generic/rte_atomic.h
@@ -207,11 +207,11 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
  *   The original value at that location
  */
 static inline uint16_t
-rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
+rte_atomic16_exchange(volatile RTE_ATOMIC(uint16_t) *dst, uint16_t val);
 
 #ifdef RTE_FORCE_INTRINSICS
 static inline uint16_t
-rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+rte_atomic16_exchange(volatile RTE_ATOMIC(uint16_t) *dst, uint16_t val)
 {
 	return rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst);
 }
@@ -492,11 +492,11 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
  *   The original value at that location
  */
 static inline uint32_t
-rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val);
+rte_atomic32_exchange(volatile RTE_ATOMIC(uint32_t) *dst, uint32_t val);
 
 #ifdef RTE_FORCE_INTRINSICS
 static inline uint32_t
-rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+rte_atomic32_exchange(volatile RTE_ATOMIC(uint32_t) *dst, uint32_t val)
 {
 	return rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst);
 }
@@ -776,11 +776,11 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
  *   The original value at that location
  */
 static inline uint64_t
-rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val);
+rte_atomic64_exchange(volatile RTE_ATOMIC(uint64_t) *dst, uint64_t val);
 
 #ifdef RTE_FORCE_INTRINSICS
 static inline uint64_t
-rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+rte_atomic64_exchange(volatile RTE_ATOMIC(uint64_t) *dst, uint64_t val)
 {
 	return rte_atomic_exchange_explicit(dst, val, rte_memory_order_seq_cst);
 }
-- 
2.30.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 2/2] net/hns3: use stdatomic API
  2023-12-11  7:39 [PATCH 0/2] bugfix and replace on use of stdatomic API Jie Hai
  2023-12-11  7:39 ` [PATCH 1/2] eal: fix constraints on " Jie Hai
@ 2023-12-11  7:39 ` Jie Hai
  1 sibling, 0 replies; 8+ messages in thread
From: Jie Hai @ 2023-12-11  7:39 UTC (permalink / raw)
  To: dev, Yisen Zhuang; +Cc: lihuisong, fengchengwen, liudongdong3, haijie1

Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API.

Signed-off-by: Jie Hai <haijie1@huawei.com>
---
 drivers/net/hns3/hns3_cmd.c       | 24 +++++++----
 drivers/net/hns3/hns3_dcb.c       |  3 +-
 drivers/net/hns3/hns3_ethdev.c    | 52 ++++++++++++++---------
 drivers/net/hns3/hns3_ethdev.h    | 36 ++++++++--------
 drivers/net/hns3/hns3_ethdev_vf.c | 68 +++++++++++++++++--------------
 drivers/net/hns3/hns3_intr.c      | 47 +++++++++++----------
 drivers/net/hns3/hns3_intr.h      |  4 +-
 drivers/net/hns3/hns3_mbx.c       |  9 ++--
 drivers/net/hns3/hns3_mp.c        |  9 ++--
 drivers/net/hns3/hns3_rxtx.c      | 15 ++++---
 drivers/net/hns3/hns3_tm.c        |  6 ++-
 11 files changed, 160 insertions(+), 113 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 2c1664485bef..49cb2cc3dacf 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,13 @@ static int
 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
 		      uint64_t size, uint32_t alignment)
 {
-	static uint64_t hns3_dma_memzone_id;
+	static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
 	const struct rte_memzone *mz = NULL;
 	char z_name[RTE_MEMZONE_NAMESIZE];
 
 	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
-		__atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+		rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1,
+					      rte_memory_order_relaxed));
 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
 					 RTE_MEMZONE_IOVA_CONTIG, alignment,
 					 RTE_PGSIZE_2M);
@@ -198,8 +199,8 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
 		hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
 			 csq->next_to_use, csq->next_to_clean);
 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-			__atomic_store_n(&hw->reset.disable_cmd, 1,
-					 __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+						  rte_memory_order_relaxed);
 			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
 		}
 
@@ -313,7 +314,8 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 		if (hns3_cmd_csq_done(hw))
 			return 0;
 
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+					     rte_memory_order_relaxed)) {
 			hns3_err(hw,
 				 "Don't wait for reply because of disable_cmd");
 			return -EBUSY;
@@ -360,7 +362,8 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
 	int retval;
 	uint32_t ntc;
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -745,7 +748,8 @@ hns3_cmd_init(struct hns3_hw *hw)
 		ret = -EBUSY;
 		goto err_cmd_init;
 	}
-	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0,
+				  rte_memory_order_relaxed);
 
 	ret = hns3_cmd_query_firmware_version_and_capability(hw);
 	if (ret) {
@@ -788,7 +792,8 @@ hns3_cmd_init(struct hns3_hw *hw)
 	return 0;
 
 err_cmd_init:
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 	return ret;
 }
 
@@ -817,7 +822,8 @@ hns3_cmd_uninit(struct hns3_hw *hw)
 	if (!hns->is_vf)
 		(void)hns3_firmware_compat_config(hw, false);
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 
 	/*
 	 * A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 2831d3dc6205..08c77e04857d 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,8 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
 	 * and configured directly to the hardware in the RESET_STAGE_RESTORE
 	 * stage of the reset process.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		for (i = 0; i < hw->rss_ind_tbl_size; i++)
 			rss_cfg->rss_indirection_tbl[i] =
 							i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index eafcf2c6f644..ef0ff778d483 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = {
 };
 
 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
-						 uint64_t *levels);
+						 RTE_ATOMIC(uint64_t)* levels);
 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
 				    int on);
@@ -134,7 +134,8 @@ hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val)
 {
 	struct hns3_hw *hw = &hns->hw;
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 	hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
 	*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
 	hw->reset.stats.imp_cnt++;
@@ -148,7 +149,8 @@ hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val)
 {
 	struct hns3_hw *hw = &hns->hw;
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 	hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
 	*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
 	hw->reset.stats.global_cnt++;
@@ -1151,7 +1153,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 	 * ensure that the hardware configuration remains unchanged before and
 	 * after reset.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
 		hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
 	}
@@ -1175,7 +1178,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 	 * we will restore configurations to hardware in hns3_restore_vlan_table
 	 * and hns3_restore_vlan_conf later.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
 		if (ret) {
 			hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5063,8 @@ hns3_dev_start(struct rte_eth_dev *dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5155,8 @@ hns3_do_stop(struct hns3_adapter *hns)
 	 * during reset and is required to be released after the reset is
 	 * completed.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0)
 		hns3_dev_release_mbufs(hns);
 
 	ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5164,8 @@ hns3_do_stop(struct hns3_adapter *hns)
 		return ret;
 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
 		ret = hns3_reset_all_tqps(hns);
 		if (ret) {
@@ -5184,7 +5191,8 @@ hns3_dev_stop(struct rte_eth_dev *dev)
 	hns3_stop_rxtx_datapath(dev);
 
 	rte_spinlock_lock(&hw->lock);
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		hns3_tm_dev_stop_proc(hw);
 		hns3_config_mac_tnl_int(hw, false);
 		hns3_stop_tqps(hw);
@@ -5577,7 +5585,8 @@ hns3_is_reset_pending(struct hns3_adapter *hns)
 
 	last_req = hns3_get_reset_level(hns, &hw->reset.pending);
 	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+					  rte_memory_order_relaxed);
 		hns3_schedule_delayed_reset(hns);
 		hns3_warn(hw, "High level reset detected, delay do reset");
 		return true;
@@ -5677,7 +5686,7 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level)
 }
 
 static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t)* levels)
 {
 	struct hns3_hw *hw = &hns->hw;
 	enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5746,8 @@ hns3_prepare_reset(struct hns3_adapter *hns)
 		 * any mailbox handling or command to firmware is only valid
 		 * after hns3_cmd_init is called.
 		 */
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+					  rte_memory_order_relaxed);
 		hw->reset.stats.request_cnt++;
 		break;
 	case HNS3_IMP_RESET:
@@ -5792,7 +5802,8 @@ hns3_stop_service(struct hns3_adapter *hns)
 	 * from table space. Hence, for function reset software intervention is
 	 * required to delete the entries
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed) == 0)
 		hns3_configure_all_mc_mac_addr(hns, true);
 	rte_spinlock_unlock(&hw->lock);
 
@@ -5913,10 +5924,10 @@ hns3_reset_service(void *param)
 	 * The interrupt may have been lost. It is necessary to handle
 	 * the interrupt to recover from the error.
 	 */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			    SCHEDULE_DEFERRED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				  __ATOMIC_RELAXED);
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) == SCHEDULE_DEFERRED) {
+		rte_atomic_store_explicit(&hw->reset.schedule,
+				SCHEDULE_REQUESTED, rte_memory_order_relaxed);
 		hns3_err(hw, "Handling interrupts in delayed tasks");
 		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
 		reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5936,8 @@ hns3_reset_service(void *param)
 			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
 		}
 	}
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE,
+				  rte_memory_order_relaxed);
 
 	/*
 	 * Check if there is any ongoing reset in the hardware. This status can
@@ -6575,8 +6587,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
 
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			    SCHEDULE_PENDING) {
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) == SCHEDULE_PENDING) {
 		hns3_err(hw, "Reschedule reset service after dev_init");
 		hns3_schedule_reset(hns);
 	} else {
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 12d8299def39..209d316479a5 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -399,17 +399,17 @@ enum hns3_schedule {
 
 struct hns3_reset_data {
 	enum hns3_reset_stage stage;
-	uint16_t schedule;
+	RTE_ATOMIC(uint16_t) schedule;
 	/* Reset flag, covering the entire reset process */
-	uint16_t resetting;
+	RTE_ATOMIC(uint16_t) resetting;
 	/* Used to disable sending cmds during reset */
-	uint16_t disable_cmd;
+	RTE_ATOMIC(uint16_t) disable_cmd;
 	/* The reset level being processed */
 	enum hns3_reset_level level;
 	/* Reset level set, each bit represents a reset level */
-	uint64_t pending;
+	RTE_ATOMIC(uint64_t) pending;
 	/* Request reset level set, from interrupt or mailbox */
-	uint64_t request;
+	RTE_ATOMIC(uint64_t) request;
 	int attempts; /* Reset failure retry */
 	int retries;  /* Timeout failure retry in reset_post */
 	/*
@@ -497,7 +497,7 @@ struct hns3_hw {
 	 * by dev_set_link_up() or dev_start().
 	 */
 	bool set_link_down;
-	unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+	RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */
 	struct hns3_tqp_stats tqp_stats;
 	/* Include Mac stats | Rx stats | Tx stats */
 	struct hns3_mac_stats mac_stats;
@@ -842,7 +842,7 @@ struct hns3_vf {
 	struct hns3_adapter *adapter;
 
 	/* Whether PF support push link status change to VF */
-	uint16_t pf_push_lsc_cap;
+	RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
 
 	/*
 	 * If PF support push link status change, VF still need send request to
@@ -851,7 +851,7 @@ struct hns3_vf {
 	 */
 	uint16_t req_link_info_cnt;
 
-	uint16_t poll_job_started; /* whether poll job is started */
+	RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
 };
 
 struct hns3_adapter {
@@ -995,32 +995,36 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
 	hns3_read_reg((a)->io_base, (reg))
 
 static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, RTE_ATOMIC(uint64_t) *addr)
 {
 	uint64_t res;
 
-	res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+	res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) &
+	       (1UL << nr)) != 0;
 	return res;
 }
 
 static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, RTE_ATOMIC(uint64_t) *addr)
 {
-	__atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+	rte_atomic_fetch_or_explicit(addr, (1UL << nr),
+				     rte_memory_order_relaxed);
 }
 
 static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
-	__atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+	rte_atomic_fetch_and_explicit(addr, ~(1UL << nr),
+				      rte_memory_order_relaxed);
 }
 
 static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
 	uint64_t mask = (1UL << nr);
 
-	return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+	return rte_atomic_fetch_and_explicit(addr,
+			~mask, rte_memory_order_relaxed) & mask;
 }
 
 int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 83d3d660056d..5b74efd9db1a 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
 };
 
 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
-						    uint64_t *levels);
+						    RTE_ATOMIC(uint64_t) *levels);
 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
 
@@ -484,7 +484,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	 * MTU value issued by hns3 VF PMD must be less than or equal to
 	 * PF's MTU.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "Failed to set mtu during resetting");
 		return -EIO;
 	}
@@ -565,7 +565,7 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
 		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
 		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
 		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 
 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
-		__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
-					  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+		rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap,
+			&exp, val, rte_memory_order_acquire, rte_memory_order_acquire);
 }
 
 static void
@@ -650,8 +650,8 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 	struct hns3_vf_to_pf_msg req;
 
-	__atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
-			 __ATOMIC_RELEASE);
+	rte_atomic_store_explicit(&vf->pf_push_lsc_cap,
+			HNS3_PF_PUSH_LSC_CAP_UNKNOWN, rte_memory_order_release);
 
 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
 	(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,8 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
 		 * mailbox from PF driver to get this capability.
 		 */
 		hns3vf_handle_mbx_msg(hw);
-		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+		if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap,
+					     rte_memory_order_acquire) !=
 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
 			break;
 		remain_ms--;
@@ -677,10 +678,10 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
 	 * state: unknown (means pf not ack), not_supported, supported.
 	 * Here config it as 'not_supported' when it's 'unknown' state.
 	 */
-	__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
-				  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+	rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp,
+		val, rte_memory_order_acquire, rte_memory_order_acquire);
 
-	if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+	if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
 		HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
 		hns3_info(hw, "detect PF support push link status change!");
 	} else {
@@ -920,7 +921,7 @@ hns3vf_request_link_info(struct hns3_hw *hw)
 	bool send_req;
 	int ret;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
 		return;
 
 	send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +957,7 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
 	 * sending request to PF kernel driver, then could update link status by
 	 * process PF kernel driver's link status mailbox message.
 	 */
-	if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+	if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
 		return;
 
 	if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +995,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw,
 			 "vf set vlan id failed during resetting, vlan_id =%u",
 			 vlan_id);
@@ -1059,7 +1060,8 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	unsigned int tmp_mask;
 	int ret = 0;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
 			 mask);
 		return -EIO;
@@ -1252,7 +1254,7 @@ hns3vf_start_poll_job(struct rte_eth_dev *dev)
 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
 		vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
 
-	__atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
 
 	hns3vf_service_handler(dev);
 }
@@ -1264,7 +1266,7 @@ hns3vf_stop_poll_job(struct rte_eth_dev *dev)
 
 	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
 
-	__atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
 }
 
 static int
@@ -1500,10 +1502,10 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 	 * during reset and is required to be released after the reset is
 	 * completed.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.resetting,  rte_memory_order_relaxed) == 0)
 		hns3_dev_release_mbufs(hns);
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
 		ret = hns3_reset_all_tqps(hns);
 		if (ret) {
@@ -1528,7 +1530,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
 	hns3_stop_rxtx_datapath(dev);
 
 	rte_spinlock_lock(&hw->lock);
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
 		hns3_stop_tqps(hw);
 		hns3vf_do_stop(hns);
 		hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1645,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1775,8 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns)
 
 	last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
 	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+					 rte_memory_order_relaxed);
 		hns3_schedule_delayed_reset(hns);
 		hns3_warn(hw, "High level reset detected, delay do reset");
 		return true;
@@ -1847,7 +1850,8 @@ hns3vf_prepare_reset(struct hns3_adapter *hns)
 		if (ret)
 			return ret;
 	}
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 
 	return 0;
 }
@@ -1888,7 +1892,8 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 	 * from table space. Hence, for function reset software intervention is
 	 * required to delete the entries.
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed) == 0)
 		hns3_configure_all_mc_mac_addr(hns, true);
 	rte_spinlock_unlock(&hw->lock);
 
@@ -2030,7 +2035,7 @@ hns3vf_restore_conf(struct hns3_adapter *hns)
 }
 
 static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
 {
 	enum hns3_reset_level reset_level;
 
@@ -2070,10 +2075,10 @@ hns3vf_reset_service(void *param)
 	 * The interrupt may have been lost. It is necessary to handle
 	 * the interrupt to recover from the error.
 	 */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
 			    SCHEDULE_DEFERRED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				 rte_memory_order_relaxed);
 		hns3_err(hw, "Handling interrupts in delayed tasks");
 		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
 		reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2087,7 @@ hns3vf_reset_service(void *param)
 			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
 		}
 	}
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
 
 	/*
 	 * Hardware reset has been notified, we now have to poll & check if
@@ -2277,8 +2282,9 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
 
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			    SCHEDULE_PENDING) {
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+				     rte_memory_order_relaxed) ==
+				     SCHEDULE_PENDING) {
 		hns3_err(hw, "Reschedule reset service after dev_init");
 		hns3_schedule_reset(hns);
 	} else {
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30dcb56..980d8b6fac54 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ hns3_get_hw_error_status(struct hns3_cmd_desc *desc, uint8_t desc_offset,
 
 static int
 hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
-		     int num, uint64_t *levels,
+		     int num, RTE_ATOMIC(uint64_t) *levels,
 		     enum hns3_hw_err_report_type err_type)
 {
 	const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
 }
 
 void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
 {
 	uint32_t mpf_bd_num, pf_bd_num, bd_num;
 	struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
 }
 
 void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
 {
 	uint32_t mpf_bd_num, pf_bd_num, bd_num;
 	struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,8 @@ hns3_reset_init(struct hns3_hw *hw)
 	hw->reset.request = 0;
 	hw->reset.pending = 0;
 	hw->reset.resetting = 0;
-	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0,
+				  rte_memory_order_relaxed);
 	hw->reset.wait_data = rte_zmalloc("wait_data",
 					  sizeof(struct hns3_wait_data), 0);
 	if (!hw->reset.wait_data) {
@@ -2419,8 +2420,8 @@ hns3_schedule_reset(struct hns3_adapter *hns)
 
 	/* Reschedule the reset process after successful initialization */
 	if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+					  rte_memory_order_relaxed);
 		return;
 	}
 
@@ -2428,15 +2429,15 @@ hns3_schedule_reset(struct hns3_adapter *hns)
 		return;
 
 	/* Schedule restart alarm if it is not scheduled yet */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			SCHEDULE_REQUESTED)
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) == SCHEDULE_REQUESTED)
 		return;
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			    SCHEDULE_DEFERRED)
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) == SCHEDULE_DEFERRED)
 		rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
 
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				 __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				  rte_memory_order_relaxed);
 
 	rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
 }
@@ -2453,11 +2454,11 @@ hns3_schedule_delayed_reset(struct hns3_adapter *hns)
 		return;
 	}
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
-			    SCHEDULE_NONE)
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) != SCHEDULE_NONE)
 		return;
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
-			 __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+				  rte_memory_order_relaxed);
 	rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
 }
 
@@ -2537,7 +2538,7 @@ hns3_reset_req_hw_reset(struct hns3_adapter *hns)
 }
 
 static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
 {
 	uint64_t merge_cnt = hw->reset.stats.merge_cnt;
 	uint64_t tmp;
@@ -2633,7 +2634,8 @@ hns3_reset_err_handle(struct hns3_adapter *hns)
 	 * Regardless of whether the execution is successful or not, the
 	 * flow after execution must be continued.
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed))
 		(void)hns3_cmd_init(hw);
 reset_fail:
 	hw->reset.attempts = 0;
@@ -2661,7 +2663,8 @@ hns3_reset_pre(struct hns3_adapter *hns)
 	int ret;
 
 	if (hw->reset.stage == RESET_STAGE_NONE) {
-		__atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hns->hw.reset.resetting, 1,
+					  rte_memory_order_relaxed);
 		hw->reset.stage = RESET_STAGE_DOWN;
 		hns3_report_reset_begin(hw);
 		ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2753,8 @@ hns3_reset_post(struct hns3_adapter *hns)
 		hns3_notify_reset_ready(hw, false);
 		hns3_clear_reset_level(hw, &hw->reset.pending);
 		hns3_clear_reset_status(hw);
-		__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hns->hw.reset.resetting, 0,
+					  rte_memory_order_relaxed);
 		hw->reset.attempts = 0;
 		hw->reset.stats.success_cnt++;
 		hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2816,8 @@ hns3_reset_fail_handle(struct hns3_adapter *hns)
 		hw->reset.mbuf_deferred_free = false;
 	}
 	rte_spinlock_unlock(&hw->lock);
-	__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hns->hw.reset.resetting, 0,
+				  rte_memory_order_relaxed);
 	hw->reset.stage = RESET_STAGE_NONE;
 	hns3_clock_gettime(&tv);
 	timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c0722c67..1edb07de361b 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
 };
 
 int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
 void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
 void hns3_handle_error(struct hns3_adapter *hns);
 
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc1668a17..c897bd39bed5 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,8 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 
 	mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
 	while (wait_time < mbx_time_limit) {
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+					     rte_memory_order_relaxed)) {
 			hns3_err(hw, "Don't wait for mbx response because of "
 				 "disable_cmd");
 			return -EBUSY;
@@ -382,7 +383,8 @@ hns3pf_handle_mbx_msg(struct hns3_hw *hw)
 	rte_spinlock_lock(&hw->cmq.crq.lock);
 
 	while (!hns3_cmd_crq_empty(hw)) {
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+					     rte_memory_order_relaxed)) {
 			rte_spinlock_unlock(&hw->cmq.crq.lock);
 			return;
 		}
@@ -457,7 +459,8 @@ hns3vf_handle_mbx_msg(struct hns3_hw *hw)
 	}
 
 	while (!hns3_cmd_crq_empty(hw)) {
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+					     rte_memory_order_relaxed)) {
 			rte_spinlock_unlock(&hw->cmq.crq.lock);
 			return;
 		}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f1941c6b2..8ee97a7c598a 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,8 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum hns3_mp_req_type type)
 	int i;
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
-		__atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+		rte_atomic_load_explicit(&hw->secondary_cnt,
+				rte_memory_order_relaxed) == 0)
 		return;
 
 	if (!mp_req_type_is_valid(type)) {
@@ -277,7 +278,8 @@ hns3_mp_init(struct rte_eth_dev *dev)
 				     ret);
 			return ret;
 		}
-		__atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1,
+					      rte_memory_order_relaxed);
 	} else {
 		ret = hns3_mp_init_primary();
 		if (ret) {
@@ -297,7 +299,8 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		__atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1,
+					      rte_memory_order_relaxed);
 
 	process_data.eth_dev_cnt--;
 	if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 9087bcffed9b..32cb314cd9e5 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4463,7 +4463,8 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 
 	if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
-	    __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+	    rte_atomic_load_explicit(&hns->hw.reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
 		eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
 		eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4529,7 +4530,8 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to start Rx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4585,7 +4587,8 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to stop Rx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4614,7 +4617,8 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to start Tx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4647,7 +4651,8 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to stop Tx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d9691640140b..656db9b170b2 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,8 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
 	if (error == NULL)
 		return -EINVAL;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 		error->message = "device is resetting";
 		/* don't goto fail_clear, user may try later */
@@ -1141,7 +1142,8 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
 	if (error == NULL)
 		return -EINVAL;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 		error->message = "device is resetting";
 		return -EBUSY;
-- 
2.30.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] eal: fix constraints on stdatomic API
  2023-12-11  7:39 ` [PATCH 1/2] eal: fix constraints on " Jie Hai
@ 2023-12-11 18:53   ` Tyler Retzlaff
  2023-12-15  2:47     ` Jie Hai
  0 siblings, 1 reply; 8+ messages in thread
From: Tyler Retzlaff @ 2023-12-11 18:53 UTC (permalink / raw)
  To: Jie Hai
  Cc: dev, Morten Brørup, Konstantin Ananyev, lihuisong,
	fengchengwen, liudongdong3

On Mon, Dec 11, 2023 at 03:39:03PM +0800, Jie Hai wrote:
> The first parameter of rte_atomic_exchange_explicit() must be a
> pointer to _Atomic type. If run command "meson setup --werror
> -Denable_stdatomic=true build && ninja -C build", error will occur.
> Thia patch fixes it.
> 
> Fixes: 1ec6a845b5cb ("eal: use stdatomic API in public headers")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Jie Hai <haijie1@huawei.com>
> ---
>  app/test/test_atomic.c               |  6 +++---
>  lib/eal/include/generic/rte_atomic.h | 12 ++++++------
>  2 files changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c
> index db07159e81ab..c3cb3ae0ea57 100644
> --- a/app/test/test_atomic.c
> +++ b/app/test/test_atomic.c
> @@ -347,9 +347,9 @@ typedef union {
>  const uint8_t CRC8_POLY = 0x91;
>  uint8_t crc8_table[256];
>  
> -volatile uint16_t token16;
> -volatile uint32_t token32;
> -volatile uint64_t token64;
> +volatile RTE_ATOMIC(uint16_t) token16;
> +volatile RTE_ATOMIC(uint32_t) token32;
> +volatile RTE_ATOMIC(uint64_t) token64;

subject to my comment below, volatile qualification can be removed.

>  
>  static void
>  build_crc8_table(void)
> diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
> index 0e639dad76a4..38c3b41f9c68 100644
> --- a/lib/eal/include/generic/rte_atomic.h
> +++ b/lib/eal/include/generic/rte_atomic.h
> @@ -207,11 +207,11 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
>   *   The original value at that location
>   */
>  static inline uint16_t
> -rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
> +rte_atomic16_exchange(volatile RTE_ATOMIC(uint16_t) *dst, uint16_t val);

the existing rte_atomicNN (the old non-standard ones) are deprecated and will
be eventually removed so there isn't a lot of value in churning their
signatures to wrap the rte_stdatomic macros.

the right thing to do here to just change the calling code to use the generic
rte_stdatomic macros directly so we can eventually remove
rte_atomicNN_xxx.

ty


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] eal: fix constraints on stdatomic API
  2023-12-11 18:53   ` Tyler Retzlaff
@ 2023-12-15  2:47     ` Jie Hai
  2023-12-15  7:17       ` Tyler Retzlaff
  0 siblings, 1 reply; 8+ messages in thread
From: Jie Hai @ 2023-12-15  2:47 UTC (permalink / raw)
  To: Tyler Retzlaff
  Cc: dev, Morten Brørup, Konstantin Ananyev, lihuisong,
	fengchengwen, liudongdong3

On 2023/12/12 2:53, Tyler Retzlaff wrote:
> On Mon, Dec 11, 2023 at 03:39:03PM +0800, Jie Hai wrote:
>> The first parameter of rte_atomic_exchange_explicit() must be a
>> pointer to _Atomic type. If run command "meson setup --werror
>> -Denable_stdatomic=true build && ninja -C build", error will occur.
>> Thia patch fixes it.
>>
>> Fixes: 1ec6a845b5cb ("eal: use stdatomic API in public headers")
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Jie Hai <haijie1@huawei.com>
>> ---
>>   app/test/test_atomic.c               |  6 +++---
>>   lib/eal/include/generic/rte_atomic.h | 12 ++++++------
>>   2 files changed, 9 insertions(+), 9 deletions(-)
>>
>> diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c
>> index db07159e81ab..c3cb3ae0ea57 100644
>> --- a/app/test/test_atomic.c
>> +++ b/app/test/test_atomic.c
>> @@ -347,9 +347,9 @@ typedef union {
>>   const uint8_t CRC8_POLY = 0x91;
>>   uint8_t crc8_table[256];
>>   
>> -volatile uint16_t token16;
>> -volatile uint32_t token32;
>> -volatile uint64_t token64;
>> +volatile RTE_ATOMIC(uint16_t) token16;
>> +volatile RTE_ATOMIC(uint32_t) token32;
>> +volatile RTE_ATOMIC(uint64_t) token64;
> 
> subject to my comment below, volatile qualification can be removed.
> 
>>   
>>   static void
>>   build_crc8_table(void)
>> diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
>> index 0e639dad76a4..38c3b41f9c68 100644
>> --- a/lib/eal/include/generic/rte_atomic.h
>> +++ b/lib/eal/include/generic/rte_atomic.h
>> @@ -207,11 +207,11 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
>>    *   The original value at that location
>>    */
>>   static inline uint16_t
>> -rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
>> +rte_atomic16_exchange(volatile RTE_ATOMIC(uint16_t) *dst, uint16_t val);
> 
> the existing rte_atomicNN (the old non-standard ones) are deprecated and will
> be eventually removed so there isn't a lot of value in churning their
> signatures to wrap the rte_stdatomic macros.
> 
> the right thing to do here to just change the calling code to use the generic
> rte_stdatomic macros directly so we can eventually remove
> rte_atomicNN_xxx.
> 
> ty
> 
Hi, Tyler Retzlaff,

Thank you for your review.

As I understand it, this code is used to test the API
rte_atomXXX_change, and the call here should not be modified.

Since the current problem affects compilation, I think it can be solved 
first.

What do you think?

Thanks,
Jie Hai
> .

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] eal: fix constraints on stdatomic API
  2023-12-15  2:47     ` Jie Hai
@ 2023-12-15  7:17       ` Tyler Retzlaff
  0 siblings, 0 replies; 8+ messages in thread
From: Tyler Retzlaff @ 2023-12-15  7:17 UTC (permalink / raw)
  To: Jie Hai
  Cc: dev, Morten Brørup, Konstantin Ananyev, lihuisong,
	fengchengwen, liudongdong3

On Fri, Dec 15, 2023 at 10:47:36AM +0800, Jie Hai wrote:
> On 2023/12/12 2:53, Tyler Retzlaff wrote:
> >On Mon, Dec 11, 2023 at 03:39:03PM +0800, Jie Hai wrote:
> >>The first parameter of rte_atomic_exchange_explicit() must be a
> >>pointer to _Atomic type. If run command "meson setup --werror
> >>-Denable_stdatomic=true build && ninja -C build", error will occur.
> >>Thia patch fixes it.
> >>
> >>Fixes: 1ec6a845b5cb ("eal: use stdatomic API in public headers")
> >>Cc: stable@dpdk.org
> >>
> >>Signed-off-by: Jie Hai <haijie1@huawei.com>
> >>---
> >>  app/test/test_atomic.c               |  6 +++---
> >>  lib/eal/include/generic/rte_atomic.h | 12 ++++++------
> >>  2 files changed, 9 insertions(+), 9 deletions(-)
> >>
> >>diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c
> >>index db07159e81ab..c3cb3ae0ea57 100644
> >>--- a/app/test/test_atomic.c
> >>+++ b/app/test/test_atomic.c
> >>@@ -347,9 +347,9 @@ typedef union {
> >>  const uint8_t CRC8_POLY = 0x91;
> >>  uint8_t crc8_table[256];
> >>-volatile uint16_t token16;
> >>-volatile uint32_t token32;
> >>-volatile uint64_t token64;
> >>+volatile RTE_ATOMIC(uint16_t) token16;
> >>+volatile RTE_ATOMIC(uint32_t) token32;
> >>+volatile RTE_ATOMIC(uint64_t) token64;
> >
> >subject to my comment below, volatile qualification can be removed.
> >
> >>  static void
> >>  build_crc8_table(void)
> >>diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
> >>index 0e639dad76a4..38c3b41f9c68 100644
> >>--- a/lib/eal/include/generic/rte_atomic.h
> >>+++ b/lib/eal/include/generic/rte_atomic.h
> >>@@ -207,11 +207,11 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
> >>   *   The original value at that location
> >>   */
> >>  static inline uint16_t
> >>-rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
> >>+rte_atomic16_exchange(volatile RTE_ATOMIC(uint16_t) *dst, uint16_t val);
> >
> >the existing rte_atomicNN (the old non-standard ones) are deprecated and will
> >be eventually removed so there isn't a lot of value in churning their
> >signatures to wrap the rte_stdatomic macros.
> >
> >the right thing to do here to just change the calling code to use the generic
> >rte_stdatomic macros directly so we can eventually remove
> >rte_atomicNN_xxx.
> >
> >ty
> >
> Hi, Tyler Retzlaff,
> 
> Thank you for your review.
> 
> As I understand it, this code is used to test the API
> rte_atomXXX_change, and the call here should not be modified.
> 
> Since the current problem affects compilation, I think it can be
> solved first.

okay, i understand the motivation now and see what you mean.

first, sorry for the trouble i did not expect anyone to start using this
option until i had completed full conversion of the tree.  drivers and
tests are still on my todo list.

for now would it be reasonable to just stop building this test when
enable_stdatomic=true? the api are still going to be tested by the ci
and builds that do not enable the option.

as for changing the signatures of the existing api i don't strictly
object since the RTE_ATOMIC() macro expands empty for the non stdatomic
builds so isn't technically an api or abi change. but there may still be
some resistance to merging regardless.

wonder if anyone else has any suggestions here?

ty

> 
> What do you think?
> 
> Thanks,
> Jie Hai
> >.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] net/hns3: use stdatomic API
  2023-11-11  1:59 ` [PATCH 2/2] net/hns3: use stdatomic API Jie Hai
@ 2023-11-11  3:20   ` lihuisong (C)
  0 siblings, 0 replies; 8+ messages in thread
From: lihuisong (C) @ 2023-11-11  3:20 UTC (permalink / raw)
  To: Jie Hai, dev, ferruh.yigit, Yisen Zhuang; +Cc: fengchengwen

Reviewed-by: Huisong Li <lihuisong@huawei.com>

在 2023/11/11 9:59, Jie Hai 写道:
> Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding rte_atomic_xxx optional stdatomic API.
>
> Signed-off-by: Jie Hai <haijie1@huawei.com>
> ---
>   drivers/net/hns3/hns3_cmd.c       | 22 +++++++-----
>   drivers/net/hns3/hns3_dcb.c       |  3 +-
>   drivers/net/hns3/hns3_ethdev.c    | 51 ++++++++++++++++-----------
>   drivers/net/hns3/hns3_ethdev.h    | 12 ++++---
>   drivers/net/hns3/hns3_ethdev_vf.c | 57 ++++++++++++++++---------------
>   drivers/net/hns3/hns3_intr.c      | 39 ++++++++++++---------
>   drivers/net/hns3/hns3_mbx.c       |  6 ++--
>   drivers/net/hns3/hns3_mp.c        |  9 +++--
>   drivers/net/hns3/hns3_rxtx.c      | 15 +++++---
>   drivers/net/hns3/hns3_tm.c        |  6 ++--
>   10 files changed, 131 insertions(+), 89 deletions(-)
>
> diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
> index 2c1664485bef..4e1a02a75e0f 100644
> --- a/drivers/net/hns3/hns3_cmd.c
> +++ b/drivers/net/hns3/hns3_cmd.c
> @@ -49,7 +49,8 @@ hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
>   	char z_name[RTE_MEMZONE_NAMESIZE];
>   
>   	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
> -		__atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
> +		rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1,
> +					      rte_memory_order_relaxed));
>   	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
>   					 RTE_MEMZONE_IOVA_CONTIG, alignment,
>   					 RTE_PGSIZE_2M);
> @@ -198,8 +199,8 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
>   		hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
>   			 csq->next_to_use, csq->next_to_clean);
>   		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> -			__atomic_store_n(&hw->reset.disable_cmd, 1,
> -					 __ATOMIC_RELAXED);
> +			rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
> +						  rte_memory_order_relaxed);
>   			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
>   		}
>   
> @@ -313,7 +314,8 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
>   		if (hns3_cmd_csq_done(hw))
>   			return 0;
>   
> -		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
> +		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
> +					     rte_memory_order_relaxed)) {
>   			hns3_err(hw,
>   				 "Don't wait for reply because of disable_cmd");
>   			return -EBUSY;
> @@ -360,7 +362,8 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
>   	int retval;
>   	uint32_t ntc;
>   
> -	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
> +	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
> +				     rte_memory_order_relaxed))
>   		return -EBUSY;
>   
>   	rte_spinlock_lock(&hw->cmq.csq.lock);
> @@ -745,7 +748,8 @@ hns3_cmd_init(struct hns3_hw *hw)
>   		ret = -EBUSY;
>   		goto err_cmd_init;
>   	}
> -	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0,
> +				  rte_memory_order_relaxed);
>   
>   	ret = hns3_cmd_query_firmware_version_and_capability(hw);
>   	if (ret) {
> @@ -788,7 +792,8 @@ hns3_cmd_init(struct hns3_hw *hw)
>   	return 0;
>   
>   err_cmd_init:
> -	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
> +				  rte_memory_order_relaxed);
>   	return ret;
>   }
>   
> @@ -817,7 +822,8 @@ hns3_cmd_uninit(struct hns3_hw *hw)
>   	if (!hns->is_vf)
>   		(void)hns3_firmware_compat_config(hw, false);
>   
> -	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
> +				  rte_memory_order_relaxed);
>   
>   	/*
>   	 * A delay is added to ensure that the register cleanup operations
> diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
> index 2831d3dc6205..08c77e04857d 100644
> --- a/drivers/net/hns3/hns3_dcb.c
> +++ b/drivers/net/hns3/hns3_dcb.c
> @@ -648,7 +648,8 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
>   	 * and configured directly to the hardware in the RESET_STAGE_RESTORE
>   	 * stage of the reset process.
>   	 */
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed) == 0) {
>   		for (i = 0; i < hw->rss_ind_tbl_size; i++)
>   			rss_cfg->rss_indirection_tbl[i] =
>   							i % hw->alloc_rss_size;
> diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
> index 941d047bf1bd..4b63308e8fdf 100644
> --- a/drivers/net/hns3/hns3_ethdev.c
> +++ b/drivers/net/hns3/hns3_ethdev.c
> @@ -134,7 +134,8 @@ hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val)
>   {
>   	struct hns3_hw *hw = &hns->hw;
>   
> -	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
> +				  rte_memory_order_relaxed);
>   	hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
>   	*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
>   	hw->reset.stats.imp_cnt++;
> @@ -148,7 +149,8 @@ hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val)
>   {
>   	struct hns3_hw *hw = &hns->hw;
>   
> -	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
> +				  rte_memory_order_relaxed);
>   	hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
>   	*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
>   	hw->reset.stats.global_cnt++;
> @@ -1151,7 +1153,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
>   	 * ensure that the hardware configuration remains unchanged before and
>   	 * after reset.
>   	 */
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed) == 0) {
>   		hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
>   		hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
>   	}
> @@ -1175,7 +1178,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
>   	 * we will restore configurations to hardware in hns3_restore_vlan_table
>   	 * and hns3_restore_vlan_conf later.
>   	 */
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed) == 0) {
>   		ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
>   		if (ret) {
>   			hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
> @@ -5059,7 +5063,8 @@ hns3_dev_start(struct rte_eth_dev *dev)
>   	int ret;
>   
>   	PMD_INIT_FUNC_TRACE();
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed))
>   		return -EBUSY;
>   
>   	rte_spinlock_lock(&hw->lock);
> @@ -5150,7 +5155,8 @@ hns3_do_stop(struct hns3_adapter *hns)
>   	 * during reset and is required to be released after the reset is
>   	 * completed.
>   	 */
> -	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed) == 0)
>   		hns3_dev_release_mbufs(hns);
>   
>   	ret = hns3_cfg_mac_mode(hw, false);
> @@ -5158,7 +5164,8 @@ hns3_do_stop(struct hns3_adapter *hns)
>   		return ret;
>   	hw->mac.link_status = RTE_ETH_LINK_DOWN;
>   
> -	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
> +	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
> +				     rte_memory_order_relaxed) == 0) {
>   		hns3_configure_all_mac_addr(hns, true);
>   		ret = hns3_reset_all_tqps(hns);
>   		if (ret) {
> @@ -5184,7 +5191,8 @@ hns3_dev_stop(struct rte_eth_dev *dev)
>   	hns3_stop_rxtx_datapath(dev);
>   
>   	rte_spinlock_lock(&hw->lock);
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed) == 0) {
>   		hns3_tm_dev_stop_proc(hw);
>   		hns3_config_mac_tnl_int(hw, false);
>   		hns3_stop_tqps(hw);
> @@ -5553,10 +5561,12 @@ hns3_detect_reset_event(struct hns3_hw *hw)
>   	last_req = hns3_get_reset_level(hns, &hw->reset.pending);
>   	vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
>   	if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) {
> -		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
> +					  rte_memory_order_relaxed);
>   		new_req = HNS3_IMP_RESET;
>   	} else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) {
> -		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
> +					  rte_memory_order_relaxed);
>   		new_req = HNS3_GLOBAL_RESET;
>   	}
>   
> @@ -5744,7 +5754,8 @@ hns3_prepare_reset(struct hns3_adapter *hns)
>   		 * any mailbox handling or command to firmware is only valid
>   		 * after hns3_cmd_init is called.
>   		 */
> -		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
> +					  rte_memory_order_relaxed);
>   		hw->reset.stats.request_cnt++;
>   		break;
>   	case HNS3_IMP_RESET:
> @@ -5799,7 +5810,8 @@ hns3_stop_service(struct hns3_adapter *hns)
>   	 * from table space. Hence, for function reset software intervention is
>   	 * required to delete the entries
>   	 */
> -	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
> +	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
> +				     rte_memory_order_relaxed) == 0)
>   		hns3_configure_all_mc_mac_addr(hns, true);
>   	rte_spinlock_unlock(&hw->lock);
>   
> @@ -5920,10 +5932,10 @@ hns3_reset_service(void *param)
>   	 * The interrupt may have been lost. It is necessary to handle
>   	 * the interrupt to recover from the error.
>   	 */
> -	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
> -			    SCHEDULE_DEFERRED) {
> -		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
> -				  __ATOMIC_RELAXED);
> +	if (rte_atomic_load_explicit(&hw->reset.schedule,
> +			rte_memory_order_relaxed) == SCHEDULE_DEFERRED) {
> +		rte_atomic_store_explicit(&hw->reset.schedule,
> +				SCHEDULE_REQUESTED, rte_memory_order_relaxed);
>   		hns3_err(hw, "Handling interrupts in delayed tasks");
>   		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
>   		reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
> @@ -5932,7 +5944,8 @@ hns3_reset_service(void *param)
>   			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
>   		}
>   	}
> -	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE,
> +				  rte_memory_order_relaxed);
>   
>   	/*
>   	 * Check if there is any ongoing reset in the hardware. This status can
> @@ -6582,8 +6595,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
>   
>   	hw->adapter_state = HNS3_NIC_INITIALIZED;
>   
> -	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
> -			    SCHEDULE_PENDING) {
> +	if (rte_atomic_load_explicit(&hw->reset.schedule,
> +			rte_memory_order_relaxed) == SCHEDULE_PENDING) {
>   		hns3_err(hw, "Reschedule reset service after dev_init");
>   		hns3_schedule_reset(hns);
>   	} else {
> diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
> index 668f141e32ed..a0d62a5fd33f 100644
> --- a/drivers/net/hns3/hns3_ethdev.h
> +++ b/drivers/net/hns3/hns3_ethdev.h
> @@ -999,20 +999,23 @@ hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
>   {
>   	uint64_t res;
>   
> -	res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
> +	res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) &
> +	       (1UL << nr)) != 0;
>   	return res;
>   }
>   
>   static inline void
>   hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
>   {
> -	__atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
> +	rte_atomic_fetch_or_explicit(addr, (1UL << nr),
> +				     rte_memory_order_relaxed);
>   }
>   
>   static inline void
>   hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
>   {
> -	__atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
> +	rte_atomic_fetch_and_explicit(addr, ~(1UL << nr),
> +				      rte_memory_order_relaxed);
>   }
>   
>   static inline uint64_t
> @@ -1020,7 +1023,8 @@ hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
>   {
>   	uint64_t mask = (1UL << nr);
>   
> -	return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
> +	return rte_atomic_fetch_and_explicit(addr,
> +			~mask, rte_memory_order_relaxed) & mask;
>   }
>   
>   int
> diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
> index 156fb905f990..51d17ee8a726 100644
> --- a/drivers/net/hns3/hns3_ethdev_vf.c
> +++ b/drivers/net/hns3/hns3_ethdev_vf.c
> @@ -478,7 +478,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
>   	 * MTU value issued by hns3 VF PMD must be less than or equal to
>   	 * PF's MTU.
>   	 */
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
>   		hns3_err(hw, "Failed to set mtu during resetting");
>   		return -EIO;
>   	}
> @@ -546,7 +546,7 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
>   		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
>   		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
>   		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
> -		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
>   		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
>   		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
>   		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
> @@ -618,8 +618,8 @@ hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
>   	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
>   
>   	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
> -		__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
> -					  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
> +		rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap,
> +			&exp, &val, rte_memory_order_acquire, rte_memory_order_acquire);
>   }
>   
>   static void
> @@ -633,8 +633,8 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
>   	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
>   	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
>   
> -	__atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
> -			 __ATOMIC_RELEASE);
> +	rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
> +			 rte_memory_order_release);
>   
>   	(void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
>   				NULL, 0);
> @@ -649,7 +649,7 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
>   		 * mailbox from PF driver to get this capability.
>   		 */
>   		hns3_dev_handle_mbx_msg(hw);
> -		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
> +		if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
>   			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
>   			break;
>   		remain_ms--;
> @@ -660,10 +660,10 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
>   	 * state: unknown (means pf not ack), not_supported, supported.
>   	 * Here config it as 'not_supported' when it's 'unknown' state.
>   	 */
> -	__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
> -				  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
> +	rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp,
> +		&val, rte_memory_order_acquire, rte_memory_order_acquire);
>   
> -	if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
> +	if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
>   		HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
>   		hns3_info(hw, "detect PF support push link status change!");
>   	} else {
> @@ -897,7 +897,7 @@ hns3vf_request_link_info(struct hns3_hw *hw)
>   	bool send_req;
>   	int ret;
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
> +	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
>   		return;
>   
>   	send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
> @@ -933,7 +933,7 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
>   	 * sending request to PF kernel driver, then could update link status by
>   	 * process PF kernel driver's link status mailbox message.
>   	 */
> -	if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
> +	if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
>   		return;
>   
>   	if (hw->adapter_state != HNS3_NIC_STARTED)
> @@ -972,7 +972,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
>   	struct hns3_hw *hw = &hns->hw;
>   	int ret;
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
>   		hns3_err(hw,
>   			 "vf set vlan id failed during resetting, vlan_id =%u",
>   			 vlan_id);
> @@ -1032,7 +1032,7 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
>   	unsigned int tmp_mask;
>   	int ret = 0;
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
>   		hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
>   			 mask);
>   		return -EIO;
> @@ -1222,7 +1222,7 @@ hns3vf_start_poll_job(struct rte_eth_dev *dev)
>   	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
>   		vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
>   
> -	__atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
>   
>   	hns3vf_service_handler(dev);
>   }
> @@ -1234,7 +1234,7 @@ hns3vf_stop_poll_job(struct rte_eth_dev *dev)
>   
>   	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
>   
> -	__atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
>   }
>   
>   static int
> @@ -1468,10 +1468,10 @@ hns3vf_do_stop(struct hns3_adapter *hns)
>   	 * during reset and is required to be released after the reset is
>   	 * completed.
>   	 */
> -	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,  rte_memory_order_relaxed) == 0)
>   		hns3_dev_release_mbufs(hns);
>   
> -	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
> +	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
>   		hns3_configure_all_mac_addr(hns, true);
>   		ret = hns3_reset_all_tqps(hns);
>   		if (ret) {
> @@ -1496,7 +1496,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
>   	hns3_stop_rxtx_datapath(dev);
>   
>   	rte_spinlock_lock(&hw->lock);
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
>   		hns3_stop_tqps(hw);
>   		hns3vf_do_stop(hns);
>   		hns3_unmap_rx_interrupt(dev);
> @@ -1611,7 +1611,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
>   	int ret;
>   
>   	PMD_INIT_FUNC_TRACE();
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
> +	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
>   		return -EBUSY;
>   
>   	rte_spinlock_lock(&hw->lock);
> @@ -1795,7 +1795,7 @@ hns3vf_prepare_reset(struct hns3_adapter *hns)
>   		if (ret)
>   			return ret;
>   	}
> -	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
>   
>   	return 0;
>   }
> @@ -1836,7 +1836,7 @@ hns3vf_stop_service(struct hns3_adapter *hns)
>   	 * from table space. Hence, for function reset software intervention is
>   	 * required to delete the entries.
>   	 */
> -	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
> +	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
>   		hns3_configure_all_mc_mac_addr(hns, true);
>   	rte_spinlock_unlock(&hw->lock);
>   
> @@ -2018,10 +2018,10 @@ hns3vf_reset_service(void *param)
>   	 * The interrupt may have been lost. It is necessary to handle
>   	 * the interrupt to recover from the error.
>   	 */
> -	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
> +	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
>   			    SCHEDULE_DEFERRED) {
> -		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
> -				 __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
> +				 rte_memory_order_relaxed);
>   		hns3_err(hw, "Handling interrupts in delayed tasks");
>   		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
>   		reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
> @@ -2030,7 +2030,7 @@ hns3vf_reset_service(void *param)
>   			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
>   		}
>   	}
> -	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
>   
>   	/*
>   	 * Hardware reset has been notified, we now have to poll & check if
> @@ -2225,8 +2225,9 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
>   
>   	hw->adapter_state = HNS3_NIC_INITIALIZED;
>   
> -	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
> -			    SCHEDULE_PENDING) {
> +	if (rte_atomic_load_explicit(&hw->reset.schedule,
> +				     rte_memory_order_relaxed) ==
> +				     SCHEDULE_PENDING) {
>   		hns3_err(hw, "Reschedule reset service after dev_init");
>   		hns3_schedule_reset(hns);
>   	} else {
> diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
> index c5a3e3797cbd..cb758cf3a9b7 100644
> --- a/drivers/net/hns3/hns3_intr.c
> +++ b/drivers/net/hns3/hns3_intr.c
> @@ -2402,7 +2402,8 @@ hns3_reset_init(struct hns3_hw *hw)
>   	hw->reset.request = 0;
>   	hw->reset.pending = 0;
>   	hw->reset.resetting = 0;
> -	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0,
> +				  rte_memory_order_relaxed);
>   	hw->reset.wait_data = rte_zmalloc("wait_data",
>   					  sizeof(struct hns3_wait_data), 0);
>   	if (!hw->reset.wait_data) {
> @@ -2419,8 +2420,8 @@ hns3_schedule_reset(struct hns3_adapter *hns)
>   
>   	/* Reschedule the reset process after successful initialization */
>   	if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
> -		__atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
> -				 __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
> +					  rte_memory_order_relaxed);
>   		return;
>   	}
>   
> @@ -2428,15 +2429,15 @@ hns3_schedule_reset(struct hns3_adapter *hns)
>   		return;
>   
>   	/* Schedule restart alarm if it is not scheduled yet */
> -	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
> -			SCHEDULE_REQUESTED)
> +	if (rte_atomic_load_explicit(&hw->reset.schedule,
> +			rte_memory_order_relaxed) == SCHEDULE_REQUESTED)
>   		return;
> -	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
> -			    SCHEDULE_DEFERRED)
> +	if (rte_atomic_load_explicit(&hw->reset.schedule,
> +			rte_memory_order_relaxed) == SCHEDULE_DEFERRED)
>   		rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
>   
> -	__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
> -				 __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
> +				  rte_memory_order_relaxed);
>   
>   	rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
>   }
> @@ -2453,11 +2454,11 @@ hns3_schedule_delayed_reset(struct hns3_adapter *hns)
>   		return;
>   	}
>   
> -	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
> -			    SCHEDULE_NONE)
> +	if (rte_atomic_load_explicit(&hw->reset.schedule,
> +			rte_memory_order_relaxed) != SCHEDULE_NONE)
>   		return;
> -	__atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
> -			 __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
> +				  rte_memory_order_relaxed);
>   	rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
>   }
>   
> @@ -2633,7 +2634,8 @@ hns3_reset_err_handle(struct hns3_adapter *hns)
>   	 * Regardless of whether the execution is successful or not, the
>   	 * flow after execution must be continued.
>   	 */
> -	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
> +	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
> +				     rte_memory_order_relaxed))
>   		(void)hns3_cmd_init(hw);
>   reset_fail:
>   	hw->reset.attempts = 0;
> @@ -2661,7 +2663,8 @@ hns3_reset_pre(struct hns3_adapter *hns)
>   	int ret;
>   
>   	if (hw->reset.stage == RESET_STAGE_NONE) {
> -		__atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&hns->hw.reset.resetting, 1,
> +					  rte_memory_order_relaxed);
>   		hw->reset.stage = RESET_STAGE_DOWN;
>   		hns3_report_reset_begin(hw);
>   		ret = hw->reset.ops->stop_service(hns);
> @@ -2750,7 +2753,8 @@ hns3_reset_post(struct hns3_adapter *hns)
>   		hns3_notify_reset_ready(hw, false);
>   		hns3_clear_reset_level(hw, &hw->reset.pending);
>   		hns3_clear_reset_event(hw);
> -		__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
> +		rte_atomic_store_explicit(&hns->hw.reset.resetting, 0,
> +					  rte_memory_order_relaxed);
>   		hw->reset.attempts = 0;
>   		hw->reset.stats.success_cnt++;
>   		hw->reset.stage = RESET_STAGE_NONE;
> @@ -2812,7 +2816,8 @@ hns3_reset_fail_handle(struct hns3_adapter *hns)
>   		hw->reset.mbuf_deferred_free = false;
>   	}
>   	rte_spinlock_unlock(&hw->lock);
> -	__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
> +	rte_atomic_store_explicit(&hns->hw.reset.resetting, 0,
> +				  rte_memory_order_relaxed);
>   	hw->reset.stage = RESET_STAGE_NONE;
>   	hns3_clock_gettime(&tv);
>   	timersub(&tv, &hw->reset.start_time, &tv_delta);
> diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
> index f1743c195efa..7af56ff23deb 100644
> --- a/drivers/net/hns3/hns3_mbx.c
> +++ b/drivers/net/hns3/hns3_mbx.c
> @@ -59,7 +59,8 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
>   
>   	mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
>   	while (wait_time < mbx_time_limit) {
> -		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
> +		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
> +					     rte_memory_order_relaxed)) {
>   			hns3_err(hw, "Don't wait for mbx response because of "
>   				 "disable_cmd");
>   			return -EBUSY;
> @@ -425,7 +426,8 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
>   	}
>   
>   	while (!hns3_cmd_crq_empty(hw)) {
> -		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
> +		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
> +					     rte_memory_order_relaxed)) {
>   			rte_spinlock_unlock(&hw->cmq.crq.lock);
>   			return;
>   		}
> diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
> index 556f1941c6b2..8ee97a7c598a 100644
> --- a/drivers/net/hns3/hns3_mp.c
> +++ b/drivers/net/hns3/hns3_mp.c
> @@ -151,7 +151,8 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum hns3_mp_req_type type)
>   	int i;
>   
>   	if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
> -		__atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
> +		rte_atomic_load_explicit(&hw->secondary_cnt,
> +				rte_memory_order_relaxed) == 0)
>   		return;
>   
>   	if (!mp_req_type_is_valid(type)) {
> @@ -277,7 +278,8 @@ hns3_mp_init(struct rte_eth_dev *dev)
>   				     ret);
>   			return ret;
>   		}
> -		__atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1,
> +					      rte_memory_order_relaxed);
>   	} else {
>   		ret = hns3_mp_init_primary();
>   		if (ret) {
> @@ -297,7 +299,8 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
>   	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>   
>   	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> -		__atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
> +		rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1,
> +					      rte_memory_order_relaxed);
>   
>   	process_data.eth_dev_cnt--;
>   	if (process_data.eth_dev_cnt == 0) {
> diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
> index 09b7e90c7000..bb600475e91e 100644
> --- a/drivers/net/hns3/hns3_rxtx.c
> +++ b/drivers/net/hns3/hns3_rxtx.c
> @@ -4465,7 +4465,8 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
>   	struct hns3_adapter *hns = eth_dev->data->dev_private;
>   
>   	if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
> -	    __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
> +	    rte_atomic_load_explicit(&hns->hw.reset.resetting,
> +				     rte_memory_order_relaxed) == 0) {
>   		eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
>   		eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
>   		eth_dev->tx_pkt_burst = hw->set_link_down ?
> @@ -4531,7 +4532,8 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
>   
>   	rte_spinlock_lock(&hw->lock);
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed)) {
>   		hns3_err(hw, "fail to start Rx queue during resetting.");
>   		rte_spinlock_unlock(&hw->lock);
>   		return -EIO;
> @@ -4587,7 +4589,8 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
>   
>   	rte_spinlock_lock(&hw->lock);
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed)) {
>   		hns3_err(hw, "fail to stop Rx queue during resetting.");
>   		rte_spinlock_unlock(&hw->lock);
>   		return -EIO;
> @@ -4616,7 +4619,8 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
>   
>   	rte_spinlock_lock(&hw->lock);
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed)) {
>   		hns3_err(hw, "fail to start Tx queue during resetting.");
>   		rte_spinlock_unlock(&hw->lock);
>   		return -EIO;
> @@ -4649,7 +4653,8 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
>   
>   	rte_spinlock_lock(&hw->lock);
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed)) {
>   		hns3_err(hw, "fail to stop Tx queue during resetting.");
>   		rte_spinlock_unlock(&hw->lock);
>   		return -EIO;
> diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
> index d9691640140b..656db9b170b2 100644
> --- a/drivers/net/hns3/hns3_tm.c
> +++ b/drivers/net/hns3/hns3_tm.c
> @@ -1051,7 +1051,8 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
>   	if (error == NULL)
>   		return -EINVAL;
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed)) {
>   		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
>   		error->message = "device is resetting";
>   		/* don't goto fail_clear, user may try later */
> @@ -1141,7 +1142,8 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
>   	if (error == NULL)
>   		return -EINVAL;
>   
> -	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
> +	if (rte_atomic_load_explicit(&hw->reset.resetting,
> +				     rte_memory_order_relaxed)) {
>   		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
>   		error->message = "device is resetting";
>   		return -EBUSY;

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 2/2] net/hns3: use stdatomic API
  2023-11-11  1:59 [PATCH 0/2] net/hns3: fix mailbox bug and replace __atomic_xxx API Jie Hai
@ 2023-11-11  1:59 ` Jie Hai
  2023-11-11  3:20   ` lihuisong (C)
  0 siblings, 1 reply; 8+ messages in thread
From: Jie Hai @ 2023-11-11  1:59 UTC (permalink / raw)
  To: dev, ferruh.yigit, Yisen Zhuang; +Cc: fengchengwen, lihuisong

Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API.

Signed-off-by: Jie Hai <haijie1@huawei.com>
---
 drivers/net/hns3/hns3_cmd.c       | 22 +++++++-----
 drivers/net/hns3/hns3_dcb.c       |  3 +-
 drivers/net/hns3/hns3_ethdev.c    | 51 ++++++++++++++++-----------
 drivers/net/hns3/hns3_ethdev.h    | 12 ++++---
 drivers/net/hns3/hns3_ethdev_vf.c | 57 ++++++++++++++++---------------
 drivers/net/hns3/hns3_intr.c      | 39 ++++++++++++---------
 drivers/net/hns3/hns3_mbx.c       |  6 ++--
 drivers/net/hns3/hns3_mp.c        |  9 +++--
 drivers/net/hns3/hns3_rxtx.c      | 15 +++++---
 drivers/net/hns3/hns3_tm.c        |  6 ++--
 10 files changed, 131 insertions(+), 89 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 2c1664485bef..4e1a02a75e0f 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -49,7 +49,8 @@ hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
 	char z_name[RTE_MEMZONE_NAMESIZE];
 
 	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
-		__atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+		rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1,
+					      rte_memory_order_relaxed));
 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
 					 RTE_MEMZONE_IOVA_CONTIG, alignment,
 					 RTE_PGSIZE_2M);
@@ -198,8 +199,8 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
 		hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
 			 csq->next_to_use, csq->next_to_clean);
 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-			__atomic_store_n(&hw->reset.disable_cmd, 1,
-					 __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+						  rte_memory_order_relaxed);
 			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
 		}
 
@@ -313,7 +314,8 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 		if (hns3_cmd_csq_done(hw))
 			return 0;
 
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+					     rte_memory_order_relaxed)) {
 			hns3_err(hw,
 				 "Don't wait for reply because of disable_cmd");
 			return -EBUSY;
@@ -360,7 +362,8 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
 	int retval;
 	uint32_t ntc;
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -745,7 +748,8 @@ hns3_cmd_init(struct hns3_hw *hw)
 		ret = -EBUSY;
 		goto err_cmd_init;
 	}
-	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0,
+				  rte_memory_order_relaxed);
 
 	ret = hns3_cmd_query_firmware_version_and_capability(hw);
 	if (ret) {
@@ -788,7 +792,8 @@ hns3_cmd_init(struct hns3_hw *hw)
 	return 0;
 
 err_cmd_init:
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 	return ret;
 }
 
@@ -817,7 +822,8 @@ hns3_cmd_uninit(struct hns3_hw *hw)
 	if (!hns->is_vf)
 		(void)hns3_firmware_compat_config(hw, false);
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 
 	/*
 	 * A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 2831d3dc6205..08c77e04857d 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,8 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q)
 	 * and configured directly to the hardware in the RESET_STAGE_RESTORE
 	 * stage of the reset process.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		for (i = 0; i < hw->rss_ind_tbl_size; i++)
 			rss_cfg->rss_indirection_tbl[i] =
 							i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 941d047bf1bd..4b63308e8fdf 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -134,7 +134,8 @@ hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val)
 {
 	struct hns3_hw *hw = &hns->hw;
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 	hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
 	*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
 	hw->reset.stats.imp_cnt++;
@@ -148,7 +149,8 @@ hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val)
 {
 	struct hns3_hw *hw = &hns->hw;
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+				  rte_memory_order_relaxed);
 	hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
 	*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
 	hw->reset.stats.global_cnt++;
@@ -1151,7 +1153,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 	 * ensure that the hardware configuration remains unchanged before and
 	 * after reset.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
 		hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
 	}
@@ -1175,7 +1178,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns)
 	 * we will restore configurations to hardware in hns3_restore_vlan_table
 	 * and hns3_restore_vlan_conf later.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
 		if (ret) {
 			hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5063,8 @@ hns3_dev_start(struct rte_eth_dev *dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5155,8 @@ hns3_do_stop(struct hns3_adapter *hns)
 	 * during reset and is required to be released after the reset is
 	 * completed.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0)
 		hns3_dev_release_mbufs(hns);
 
 	ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5164,8 @@ hns3_do_stop(struct hns3_adapter *hns)
 		return ret;
 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
 		ret = hns3_reset_all_tqps(hns);
 		if (ret) {
@@ -5184,7 +5191,8 @@ hns3_dev_stop(struct rte_eth_dev *dev)
 	hns3_stop_rxtx_datapath(dev);
 
 	rte_spinlock_lock(&hw->lock);
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		hns3_tm_dev_stop_proc(hw);
 		hns3_config_mac_tnl_int(hw, false);
 		hns3_stop_tqps(hw);
@@ -5553,10 +5561,12 @@ hns3_detect_reset_event(struct hns3_hw *hw)
 	last_req = hns3_get_reset_level(hns, &hw->reset.pending);
 	vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
 	if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) {
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+					  rte_memory_order_relaxed);
 		new_req = HNS3_IMP_RESET;
 	} else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) {
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+					  rte_memory_order_relaxed);
 		new_req = HNS3_GLOBAL_RESET;
 	}
 
@@ -5744,7 +5754,8 @@ hns3_prepare_reset(struct hns3_adapter *hns)
 		 * any mailbox handling or command to firmware is only valid
 		 * after hns3_cmd_init is called.
 		 */
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+					  rte_memory_order_relaxed);
 		hw->reset.stats.request_cnt++;
 		break;
 	case HNS3_IMP_RESET:
@@ -5799,7 +5810,8 @@ hns3_stop_service(struct hns3_adapter *hns)
 	 * from table space. Hence, for function reset software intervention is
 	 * required to delete the entries
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed) == 0)
 		hns3_configure_all_mc_mac_addr(hns, true);
 	rte_spinlock_unlock(&hw->lock);
 
@@ -5920,10 +5932,10 @@ hns3_reset_service(void *param)
 	 * The interrupt may have been lost. It is necessary to handle
 	 * the interrupt to recover from the error.
 	 */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			    SCHEDULE_DEFERRED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				  __ATOMIC_RELAXED);
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) == SCHEDULE_DEFERRED) {
+		rte_atomic_store_explicit(&hw->reset.schedule,
+				SCHEDULE_REQUESTED, rte_memory_order_relaxed);
 		hns3_err(hw, "Handling interrupts in delayed tasks");
 		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
 		reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5932,7 +5944,8 @@ hns3_reset_service(void *param)
 			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
 		}
 	}
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE,
+				  rte_memory_order_relaxed);
 
 	/*
 	 * Check if there is any ongoing reset in the hardware. This status can
@@ -6582,8 +6595,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
 
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			    SCHEDULE_PENDING) {
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) == SCHEDULE_PENDING) {
 		hns3_err(hw, "Reschedule reset service after dev_init");
 		hns3_schedule_reset(hns);
 	} else {
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 668f141e32ed..a0d62a5fd33f 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -999,20 +999,23 @@ hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
 {
 	uint64_t res;
 
-	res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+	res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) &
+	       (1UL << nr)) != 0;
 	return res;
 }
 
 static inline void
 hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
 {
-	__atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+	rte_atomic_fetch_or_explicit(addr, (1UL << nr),
+				     rte_memory_order_relaxed);
 }
 
 static inline void
 hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
 {
-	__atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+	rte_atomic_fetch_and_explicit(addr, ~(1UL << nr),
+				      rte_memory_order_relaxed);
 }
 
 static inline uint64_t
@@ -1020,7 +1023,8 @@ hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
 {
 	uint64_t mask = (1UL << nr);
 
-	return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+	return rte_atomic_fetch_and_explicit(addr,
+			~mask, rte_memory_order_relaxed) & mask;
 }
 
 int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 156fb905f990..51d17ee8a726 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -478,7 +478,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	 * MTU value issued by hns3 VF PMD must be less than or equal to
 	 * PF's MTU.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "Failed to set mtu during resetting");
 		return -EIO;
 	}
@@ -546,7 +546,7 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
 		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
 		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
 		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -618,8 +618,8 @@ hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 
 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
-		__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
-					  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+		rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap,
+			&exp, &val, rte_memory_order_acquire, rte_memory_order_acquire);
 }
 
 static void
@@ -633,8 +633,8 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 
-	__atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
-			 __ATOMIC_RELEASE);
+	rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+			 rte_memory_order_release);
 
 	(void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false,
 				NULL, 0);
@@ -649,7 +649,7 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
 		 * mailbox from PF driver to get this capability.
 		 */
 		hns3_dev_handle_mbx_msg(hw);
-		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+		if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
 			break;
 		remain_ms--;
@@ -660,10 +660,10 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
 	 * state: unknown (means pf not ack), not_supported, supported.
 	 * Here config it as 'not_supported' when it's 'unknown' state.
 	 */
-	__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
-				  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+	rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp,
+		&val, rte_memory_order_acquire, rte_memory_order_acquire);
 
-	if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+	if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
 		HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
 		hns3_info(hw, "detect PF support push link status change!");
 	} else {
@@ -897,7 +897,7 @@ hns3vf_request_link_info(struct hns3_hw *hw)
 	bool send_req;
 	int ret;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
 		return;
 
 	send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -933,7 +933,7 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
 	 * sending request to PF kernel driver, then could update link status by
 	 * process PF kernel driver's link status mailbox message.
 	 */
-	if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+	if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
 		return;
 
 	if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -972,7 +972,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw,
 			 "vf set vlan id failed during resetting, vlan_id =%u",
 			 vlan_id);
@@ -1032,7 +1032,7 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	unsigned int tmp_mask;
 	int ret = 0;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
 			 mask);
 		return -EIO;
@@ -1222,7 +1222,7 @@ hns3vf_start_poll_job(struct rte_eth_dev *dev)
 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
 		vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
 
-	__atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
 
 	hns3vf_service_handler(dev);
 }
@@ -1234,7 +1234,7 @@ hns3vf_stop_poll_job(struct rte_eth_dev *dev)
 
 	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
 
-	__atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
 }
 
 static int
@@ -1468,10 +1468,10 @@ hns3vf_do_stop(struct hns3_adapter *hns)
 	 * during reset and is required to be released after the reset is
 	 * completed.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.resetting,  rte_memory_order_relaxed) == 0)
 		hns3_dev_release_mbufs(hns);
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
 		ret = hns3_reset_all_tqps(hns);
 		if (ret) {
@@ -1496,7 +1496,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
 	hns3_stop_rxtx_datapath(dev);
 
 	rte_spinlock_lock(&hw->lock);
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
 		hns3_stop_tqps(hw);
 		hns3vf_do_stop(hns);
 		hns3_unmap_rx_interrupt(dev);
@@ -1611,7 +1611,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->lock);
@@ -1795,7 +1795,7 @@ hns3vf_prepare_reset(struct hns3_adapter *hns)
 		if (ret)
 			return ret;
 	}
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 
 	return 0;
 }
@@ -1836,7 +1836,7 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 	 * from table space. Hence, for function reset software intervention is
 	 * required to delete the entries.
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
 		hns3_configure_all_mc_mac_addr(hns, true);
 	rte_spinlock_unlock(&hw->lock);
 
@@ -2018,10 +2018,10 @@ hns3vf_reset_service(void *param)
 	 * The interrupt may have been lost. It is necessary to handle
 	 * the interrupt to recover from the error.
 	 */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
 			    SCHEDULE_DEFERRED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				 rte_memory_order_relaxed);
 		hns3_err(hw, "Handling interrupts in delayed tasks");
 		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
 		reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2030,7 +2030,7 @@ hns3vf_reset_service(void *param)
 			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
 		}
 	}
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
 
 	/*
 	 * Hardware reset has been notified, we now have to poll & check if
@@ -2225,8 +2225,9 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
 
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			    SCHEDULE_PENDING) {
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+				     rte_memory_order_relaxed) ==
+				     SCHEDULE_PENDING) {
 		hns3_err(hw, "Reschedule reset service after dev_init");
 		hns3_schedule_reset(hns);
 	} else {
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index c5a3e3797cbd..cb758cf3a9b7 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2402,7 +2402,8 @@ hns3_reset_init(struct hns3_hw *hw)
 	hw->reset.request = 0;
 	hw->reset.pending = 0;
 	hw->reset.resetting = 0;
-	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0,
+				  rte_memory_order_relaxed);
 	hw->reset.wait_data = rte_zmalloc("wait_data",
 					  sizeof(struct hns3_wait_data), 0);
 	if (!hw->reset.wait_data) {
@@ -2419,8 +2420,8 @@ hns3_schedule_reset(struct hns3_adapter *hns)
 
 	/* Reschedule the reset process after successful initialization */
 	if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+					  rte_memory_order_relaxed);
 		return;
 	}
 
@@ -2428,15 +2429,15 @@ hns3_schedule_reset(struct hns3_adapter *hns)
 		return;
 
 	/* Schedule restart alarm if it is not scheduled yet */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			SCHEDULE_REQUESTED)
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) == SCHEDULE_REQUESTED)
 		return;
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
-			    SCHEDULE_DEFERRED)
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) == SCHEDULE_DEFERRED)
 		rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
 
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				 __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				  rte_memory_order_relaxed);
 
 	rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
 }
@@ -2453,11 +2454,11 @@ hns3_schedule_delayed_reset(struct hns3_adapter *hns)
 		return;
 	}
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
-			    SCHEDULE_NONE)
+	if (rte_atomic_load_explicit(&hw->reset.schedule,
+			rte_memory_order_relaxed) != SCHEDULE_NONE)
 		return;
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
-			 __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+				  rte_memory_order_relaxed);
 	rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
 }
 
@@ -2633,7 +2634,8 @@ hns3_reset_err_handle(struct hns3_adapter *hns)
 	 * Regardless of whether the execution is successful or not, the
 	 * flow after execution must be continued.
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+				     rte_memory_order_relaxed))
 		(void)hns3_cmd_init(hw);
 reset_fail:
 	hw->reset.attempts = 0;
@@ -2661,7 +2663,8 @@ hns3_reset_pre(struct hns3_adapter *hns)
 	int ret;
 
 	if (hw->reset.stage == RESET_STAGE_NONE) {
-		__atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hns->hw.reset.resetting, 1,
+					  rte_memory_order_relaxed);
 		hw->reset.stage = RESET_STAGE_DOWN;
 		hns3_report_reset_begin(hw);
 		ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2753,8 @@ hns3_reset_post(struct hns3_adapter *hns)
 		hns3_notify_reset_ready(hw, false);
 		hns3_clear_reset_level(hw, &hw->reset.pending);
 		hns3_clear_reset_event(hw);
-		__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hns->hw.reset.resetting, 0,
+					  rte_memory_order_relaxed);
 		hw->reset.attempts = 0;
 		hw->reset.stats.success_cnt++;
 		hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2816,8 @@ hns3_reset_fail_handle(struct hns3_adapter *hns)
 		hw->reset.mbuf_deferred_free = false;
 	}
 	rte_spinlock_unlock(&hw->lock);
-	__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hns->hw.reset.resetting, 0,
+				  rte_memory_order_relaxed);
 	hw->reset.stage = RESET_STAGE_NONE;
 	hns3_clock_gettime(&tv);
 	timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index f1743c195efa..7af56ff23deb 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -59,7 +59,8 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 
 	mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
 	while (wait_time < mbx_time_limit) {
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+					     rte_memory_order_relaxed)) {
 			hns3_err(hw, "Don't wait for mbx response because of "
 				 "disable_cmd");
 			return -EBUSY;
@@ -425,7 +426,8 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 	}
 
 	while (!hns3_cmd_crq_empty(hw)) {
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd,
+					     rte_memory_order_relaxed)) {
 			rte_spinlock_unlock(&hw->cmq.crq.lock);
 			return;
 		}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f1941c6b2..8ee97a7c598a 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,8 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum hns3_mp_req_type type)
 	int i;
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
-		__atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+		rte_atomic_load_explicit(&hw->secondary_cnt,
+				rte_memory_order_relaxed) == 0)
 		return;
 
 	if (!mp_req_type_is_valid(type)) {
@@ -277,7 +278,8 @@ hns3_mp_init(struct rte_eth_dev *dev)
 				     ret);
 			return ret;
 		}
-		__atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1,
+					      rte_memory_order_relaxed);
 	} else {
 		ret = hns3_mp_init_primary();
 		if (ret) {
@@ -297,7 +299,8 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		__atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1,
+					      rte_memory_order_relaxed);
 
 	process_data.eth_dev_cnt--;
 	if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 09b7e90c7000..bb600475e91e 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4465,7 +4465,8 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 
 	if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
-	    __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+	    rte_atomic_load_explicit(&hns->hw.reset.resetting,
+				     rte_memory_order_relaxed) == 0) {
 		eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
 		eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
 		eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4531,7 +4532,8 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to start Rx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4587,7 +4589,8 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to stop Rx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4616,7 +4619,8 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to start Tx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4649,7 +4653,8 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to stop Tx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d9691640140b..656db9b170b2 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,8 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
 	if (error == NULL)
 		return -EINVAL;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 		error->message = "device is resetting";
 		/* don't goto fail_clear, user may try later */
@@ -1141,7 +1142,8 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
 	if (error == NULL)
 		return -EINVAL;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting,
+				     rte_memory_order_relaxed)) {
 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 		error->message = "device is resetting";
 		return -EBUSY;
-- 
2.30.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-12-15  7:17 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-11  7:39 [PATCH 0/2] bugfix and replace on use of stdatomic API Jie Hai
2023-12-11  7:39 ` [PATCH 1/2] eal: fix constraints on " Jie Hai
2023-12-11 18:53   ` Tyler Retzlaff
2023-12-15  2:47     ` Jie Hai
2023-12-15  7:17       ` Tyler Retzlaff
2023-12-11  7:39 ` [PATCH 2/2] net/hns3: use " Jie Hai
  -- strict thread matches above, loose matches on Subject: below --
2023-11-11  1:59 [PATCH 0/2] net/hns3: fix mailbox bug and replace __atomic_xxx API Jie Hai
2023-11-11  1:59 ` [PATCH 2/2] net/hns3: use stdatomic API Jie Hai
2023-11-11  3:20   ` lihuisong (C)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).