DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: "Mattias Rönnblom" <mattias.ronnblom@ericsson.com>,
	"Morten Brørup" <mb@smartsharesystems.com>,
	"Abdullah Sevincer" <abdullah.sevincer@intel.com>,
	"Ajit Khaparde" <ajit.khaparde@broadcom.com>,
	"Alok Prasad" <palok@marvell.com>,
	"Anatoly Burakov" <anatoly.burakov@intel.com>,
	"Andrew Rybchenko" <andrew.rybchenko@oktetlabs.ru>,
	"Anoob Joseph" <anoobj@marvell.com>,
	"Bruce Richardson" <bruce.richardson@intel.com>,
	"Byron Marohn" <byron.marohn@intel.com>,
	"Chenbo Xia" <chenbox@nvidia.com>,
	"Chengwen Feng" <fengchengwen@huawei.com>,
	"Ciara Loftus" <ciara.loftus@intel.com>,
	"Ciara Power" <ciara.power@intel.com>,
	"Dariusz Sosnowski" <dsosnowski@nvidia.com>,
	"David Hunt" <david.hunt@intel.com>,
	"Devendra Singh Rawat" <dsinghrawat@marvell.com>,
	"Erik Gabriel Carrillo" <erik.g.carrillo@intel.com>,
	"Guoyang Zhou" <zhouguoyang@huawei.com>,
	"Harman Kalra" <hkalra@marvell.com>,
	"Harry van Haaren" <harry.van.haaren@intel.com>,
	"Honnappa Nagarahalli" <honnappa.nagarahalli@arm.com>,
	"Jakub Grajciar" <jgrajcia@cisco.com>,
	"Jerin Jacob" <jerinj@marvell.com>,
	"Jeroen de Borst" <jeroendb@google.com>,
	"Jian Wang" <jianwang@trustnetic.com>,
	"Jiawen Wu" <jiawenwu@trustnetic.com>,
	"Jie Hai" <haijie1@huawei.com>,
	"Jingjing Wu" <jingjing.wu@intel.com>,
	"Joshua Washington" <joshwash@google.com>,
	"Joyce Kong" <joyce.kong@arm.com>,
	"Junfeng Guo" <junfeng.guo@intel.com>,
	"Kevin Laatz" <kevin.laatz@intel.com>,
	"Konstantin Ananyev" <konstantin.v.ananyev@yandex.ru>,
	"Liang Ma" <liangma@liangbit.com>,
	"Long Li" <longli@microsoft.com>,
	"Maciej Czekaj" <mczekaj@marvell.com>,
	"Matan Azrad" <matan@nvidia.com>,
	"Maxime Coquelin" <maxime.coquelin@redhat.com>,
	"Nicolas Chautru" <nicolas.chautru@intel.com>,
	"Ori Kam" <orika@nvidia.com>,
	"Pavan Nikhilesh" <pbhagavatula@marvell.com>,
	"Peter Mccarthy" <peter.mccarthy@intel.com>,
	"Rahul Lakkireddy" <rahul.lakkireddy@chelsio.com>,
	"Reshma Pattan" <reshma.pattan@intel.com>,
	"Rosen Xu" <rosen.xu@intel.com>,
	"Ruifeng Wang" <ruifeng.wang@arm.com>,
	"Rushil Gupta" <rushilg@google.com>,
	"Sameh Gobriel" <sameh.gobriel@intel.com>,
	"Sivaprasad Tummala" <sivaprasad.tummala@amd.com>,
	"Somnath Kotur" <somnath.kotur@broadcom.com>,
	"Stephen Hemminger" <stephen@networkplumber.org>,
	"Suanming Mou" <suanmingm@nvidia.com>,
	"Sunil Kumar Kori" <skori@marvell.com>,
	"Sunil Uttarwar" <sunilprakashrao.uttarwar@amd.com>,
	"Tetsuya Mukawa" <mtetsuyah@gmail.com>,
	"Vamsi Attunuru" <vattunuru@marvell.com>,
	"Viacheslav Ovsiienko" <viacheslavo@nvidia.com>,
	"Vladimir Medvedkin" <vladimir.medvedkin@intel.com>,
	"Xiaoyun Wang" <cloud.wangxiaoyun@huawei.com>,
	"Yipeng Wang" <yipeng1.wang@intel.com>,
	"Yisen Zhuang" <yisen.zhuang@huawei.com>,
	"Yuying Zhang" <Yuying.Zhang@intel.com>,
	"Yuying Zhang" <yuying.zhang@intel.com>,
	"Ziyang Xuan" <xuanziyang2@huawei.com>,
	"Tyler Retzlaff" <roretzla@linux.microsoft.com>
Subject: [PATCH v3 06/45] net/hns3: use rte stdatomic API
Date: Wed, 27 Mar 2024 15:37:19 -0700	[thread overview]
Message-ID: <1711579078-10624-7-git-send-email-roretzla@linux.microsoft.com> (raw)
In-Reply-To: <1711579078-10624-1-git-send-email-roretzla@linux.microsoft.com>

Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/hns3/hns3_cmd.c       | 18 ++++++------
 drivers/net/hns3/hns3_dcb.c       |  2 +-
 drivers/net/hns3/hns3_ethdev.c    | 36 +++++++++++------------
 drivers/net/hns3/hns3_ethdev.h    | 32 ++++++++++-----------
 drivers/net/hns3/hns3_ethdev_vf.c | 60 +++++++++++++++++++--------------------
 drivers/net/hns3/hns3_intr.c      | 36 +++++++++++------------
 drivers/net/hns3/hns3_intr.h      |  4 +--
 drivers/net/hns3/hns3_mbx.c       |  6 ++--
 drivers/net/hns3/hns3_mp.c        |  6 ++--
 drivers/net/hns3/hns3_rxtx.c      | 10 +++----
 drivers/net/hns3/hns3_tm.c        |  4 +--
 11 files changed, 107 insertions(+), 107 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 001ff49..3c5fdbe 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,12 @@
 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
 		      uint64_t size, uint32_t alignment)
 {
-	static uint64_t hns3_dma_memzone_id;
+	static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
 	const struct rte_memzone *mz = NULL;
 	char z_name[RTE_MEMZONE_NAMESIZE];
 
 	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
-		__atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+		rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
 					 RTE_MEMZONE_IOVA_CONTIG, alignment,
 					 RTE_PGSIZE_2M);
@@ -198,8 +198,8 @@
 		hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
 			 csq->next_to_use, csq->next_to_clean);
 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-			__atomic_store_n(&hw->reset.disable_cmd, 1,
-					 __ATOMIC_RELAXED);
+			rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+					 rte_memory_order_relaxed);
 			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
 		}
 
@@ -313,7 +313,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 		if (hns3_cmd_csq_done(hw))
 			return 0;
 
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
 			hns3_err(hw,
 				 "Don't wait for reply because of disable_cmd");
 			return -EBUSY;
@@ -360,7 +360,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 	int retval;
 	uint32_t ntc;
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -747,7 +747,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 		ret = -EBUSY;
 		goto err_cmd_init;
 	}
-	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
 
 	ret = hns3_cmd_query_firmware_version_and_capability(hw);
 	if (ret) {
@@ -790,7 +790,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 	return 0;
 
 err_cmd_init:
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 	return ret;
 }
 
@@ -819,7 +819,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
 	if (!hns->is_vf)
 		(void)hns3_firmware_compat_config(hw, false);
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 
 	/*
 	 * A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 915e4eb..2f917fe 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,7 @@
 	 * and configured directly to the hardware in the RESET_STAGE_RESTORE
 	 * stage of the reset process.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
 		for (i = 0; i < hw->rss_ind_tbl_size; i++)
 			rss_cfg->rss_indirection_tbl[i] =
 							i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a..327f6fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ struct hns3_intr_state {
 };
 
 static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
-						 uint64_t *levels);
+						 RTE_ATOMIC(uint64_t) *levels);
 static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
 				    int on);
@@ -134,7 +134,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 {
 	struct hns3_hw *hw = &hns->hw;
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 	hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
 	*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
 	hw->reset.stats.imp_cnt++;
@@ -148,7 +148,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 {
 	struct hns3_hw *hw = &hns->hw;
 
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 	hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
 	*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
 	hw->reset.stats.global_cnt++;
@@ -1151,7 +1151,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 	 * ensure that the hardware configuration remains unchanged before and
 	 * after reset.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
 		hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
 		hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
 	}
@@ -1175,7 +1175,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 	 * we will restore configurations to hardware in hns3_restore_vlan_table
 	 * and hns3_restore_vlan_conf later.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
 		ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
 		if (ret) {
 			hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5059,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5150,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 	 * during reset and is required to be released after the reset is
 	 * completed.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.resetting,  rte_memory_order_relaxed) == 0)
 		hns3_dev_release_mbufs(hns);
 
 	ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5158,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 		return ret;
 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
 		ret = hns3_reset_all_tqps(hns);
 		if (ret) {
@@ -5184,7 +5184,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 	hns3_stop_rxtx_datapath(dev);
 
 	rte_spinlock_lock(&hw->lock);
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
 		hns3_tm_dev_stop_proc(hw);
 		hns3_config_mac_tnl_int(hw, false);
 		hns3_stop_tqps(hw);
@@ -5577,7 +5577,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 
 	last_req = hns3_get_reset_level(hns, &hw->reset.pending);
 	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 		hns3_schedule_delayed_reset(hns);
 		hns3_warn(hw, "High level reset detected, delay do reset");
 		return true;
@@ -5677,7 +5677,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 }
 
 static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
 {
 	struct hns3_hw *hw = &hns->hw;
 	enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5737,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 		 * any mailbox handling or command to firmware is only valid
 		 * after hns3_cmd_init is called.
 		 */
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 		hw->reset.stats.request_cnt++;
 		break;
 	case HNS3_IMP_RESET:
@@ -5792,7 +5792,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 	 * from table space. Hence, for function reset software intervention is
 	 * required to delete the entries
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
 		hns3_configure_all_mc_mac_addr(hns, true);
 	rte_spinlock_unlock(&hw->lock);
 
@@ -5913,10 +5913,10 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 	 * The interrupt may have been lost. It is necessary to handle
 	 * the interrupt to recover from the error.
 	 */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
 			    SCHEDULE_DEFERRED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				  __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				  rte_memory_order_relaxed);
 		hns3_err(hw, "Handling interrupts in delayed tasks");
 		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
 		reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5925,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
 		}
 	}
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
 
 	/*
 	 * Check if there is any ongoing reset in the hardware. This status can
@@ -6576,7 +6576,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
 
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
 			    SCHEDULE_PENDING) {
 		hns3_err(hw, "Reschedule reset service after dev_init");
 		hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e70c5ff..4c0f076 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -401,17 +401,17 @@ enum hns3_schedule {
 
 struct hns3_reset_data {
 	enum hns3_reset_stage stage;
-	uint16_t schedule;
+	RTE_ATOMIC(uint16_t) schedule;
 	/* Reset flag, covering the entire reset process */
-	uint16_t resetting;
+	RTE_ATOMIC(uint16_t) resetting;
 	/* Used to disable sending cmds during reset */
-	uint16_t disable_cmd;
+	RTE_ATOMIC(uint16_t) disable_cmd;
 	/* The reset level being processed */
 	enum hns3_reset_level level;
 	/* Reset level set, each bit represents a reset level */
-	uint64_t pending;
+	RTE_ATOMIC(uint64_t) pending;
 	/* Request reset level set, from interrupt or mailbox */
-	uint64_t request;
+	RTE_ATOMIC(uint64_t) request;
 	int attempts; /* Reset failure retry */
 	int retries;  /* Timeout failure retry in reset_post */
 	/*
@@ -499,7 +499,7 @@ struct hns3_hw {
 	 * by dev_set_link_up() or dev_start().
 	 */
 	bool set_link_down;
-	unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+	RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */
 	struct hns3_tqp_stats tqp_stats;
 	/* Include Mac stats | Rx stats | Tx stats */
 	struct hns3_mac_stats mac_stats;
@@ -844,7 +844,7 @@ struct hns3_vf {
 	struct hns3_adapter *adapter;
 
 	/* Whether PF support push link status change to VF */
-	uint16_t pf_push_lsc_cap;
+	RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
 
 	/*
 	 * If PF support push link status change, VF still need send request to
@@ -853,7 +853,7 @@ struct hns3_vf {
 	 */
 	uint16_t req_link_info_cnt;
 
-	uint16_t poll_job_started; /* whether poll job is started */
+	RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
 };
 
 struct hns3_adapter {
@@ -997,32 +997,32 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
 	hns3_read_reg((a)->io_base, (reg))
 
 static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
 	uint64_t res;
 
-	res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+	res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & (1UL << nr)) != 0;
 	return res;
 }
 
 static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
-	__atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+	rte_atomic_fetch_or_explicit(addr, (1UL << nr), rte_memory_order_relaxed);
 }
 
 static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
-	__atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+	rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), rte_memory_order_relaxed);
 }
 
 static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
 {
 	uint64_t mask = (1UL << nr);
 
-	return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+	return rte_atomic_fetch_and_explicit(addr, ~mask, rte_memory_order_relaxed) & mask;
 }
 
 int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a..b83d5b9 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
 };
 
 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
-						    uint64_t *levels);
+						    RTE_ATOMIC(uint64_t) *levels);
 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
 
@@ -484,7 +484,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	 * MTU value issued by hns3 VF PMD must be less than or equal to
 	 * PF's MTU.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "Failed to set mtu during resetting");
 		return -EIO;
 	}
@@ -565,7 +565,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
 		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
 		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
 		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 
 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
-		__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
-					  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+		rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+					  rte_memory_order_acquire, rte_memory_order_acquire);
 }
 
 static void
@@ -650,8 +650,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
 	struct hns3_vf_to_pf_msg req;
 
-	__atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
-			 __ATOMIC_RELEASE);
+	rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+			 rte_memory_order_release);
 
 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
 	(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 		 * mailbox from PF driver to get this capability.
 		 */
 		hns3vf_handle_mbx_msg(hw);
-		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+		if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
 			break;
 		remain_ms--;
@@ -677,10 +677,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	 * state: unknown (means pf not ack), not_supported, supported.
 	 * Here config it as 'not_supported' when it's 'unknown' state.
 	 */
-	__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
-				  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+	rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+				  rte_memory_order_acquire, rte_memory_order_acquire);
 
-	if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+	if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
 		HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
 		hns3_info(hw, "detect PF support push link status change!");
 	} else {
@@ -920,7 +920,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	bool send_req;
 	int ret;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
 		return;
 
 	send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +956,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	 * sending request to PF kernel driver, then could update link status by
 	 * process PF kernel driver's link status mailbox message.
 	 */
-	if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+	if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
 		return;
 
 	if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +994,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw,
 			 "vf set vlan id failed during resetting, vlan_id =%u",
 			 vlan_id);
@@ -1059,7 +1059,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	unsigned int tmp_mask;
 	int ret = 0;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
 			 mask);
 		return -EIO;
@@ -1252,7 +1252,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
 		vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
 
-	__atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
 
 	hns3vf_service_handler(dev);
 }
@@ -1264,7 +1264,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
 
-	__atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
 }
 
 static int
@@ -1500,10 +1500,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	 * during reset and is required to be released after the reset is
 	 * completed.
 	 */
-	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.resetting,  rte_memory_order_relaxed) == 0)
 		hns3_dev_release_mbufs(hns);
 
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
 		hns3_configure_all_mac_addr(hns, true);
 		ret = hns3_reset_all_tqps(hns);
 		if (ret) {
@@ -1528,7 +1528,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	hns3_stop_rxtx_datapath(dev);
 
 	rte_spinlock_lock(&hw->lock);
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
 		hns3_stop_tqps(hw);
 		hns3vf_do_stop(hns);
 		hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1643,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
 		return -EBUSY;
 
 	rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1773,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
 	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
-		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 		hns3_schedule_delayed_reset(hns);
 		hns3_warn(hw, "High level reset detected, delay do reset");
 		return true;
@@ -1847,7 +1847,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 		if (ret)
 			return ret;
 	}
-	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
 
 	return 0;
 }
@@ -1888,7 +1888,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	 * from table space. Hence, for function reset software intervention is
 	 * required to delete the entries.
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
 		hns3_configure_all_mc_mac_addr(hns, true);
 	rte_spinlock_unlock(&hw->lock);
 
@@ -2030,7 +2030,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 }
 
 static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
 {
 	enum hns3_reset_level reset_level;
 
@@ -2070,10 +2070,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 	 * The interrupt may have been lost. It is necessary to handle
 	 * the interrupt to recover from the error.
 	 */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
 			    SCHEDULE_DEFERRED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				 rte_memory_order_relaxed);
 		hns3_err(hw, "Handling interrupts in delayed tasks");
 		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
 		reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2082,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
 		}
 	}
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
 
 	/*
 	 * Hardware reset has been notified, we now have to poll & check if
@@ -2278,7 +2278,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
 
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
 			    SCHEDULE_PENDING) {
 		hns3_err(hw, "Reschedule reset service after dev_init");
 		hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30..26fa2eb 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ enum hns3_hw_err_report_type {
 
 static int
 hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
-		     int num, uint64_t *levels,
+		     int num, RTE_ATOMIC(uint64_t) *levels,
 		     enum hns3_hw_err_report_type err_type)
 {
 	const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ enum hns3_hw_err_report_type {
 }
 
 void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
 {
 	uint32_t mpf_bd_num, pf_bd_num, bd_num;
 	struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ enum hns3_hw_err_report_type {
 }
 
 void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
 {
 	uint32_t mpf_bd_num, pf_bd_num, bd_num;
 	struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,7 @@ enum hns3_hw_err_report_type {
 	hw->reset.request = 0;
 	hw->reset.pending = 0;
 	hw->reset.resetting = 0;
-	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
 	hw->reset.wait_data = rte_zmalloc("wait_data",
 					  sizeof(struct hns3_wait_data), 0);
 	if (!hw->reset.wait_data) {
@@ -2419,8 +2419,8 @@ enum hns3_hw_err_report_type {
 
 	/* Reschedule the reset process after successful initialization */
 	if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
-		__atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
-				 __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+				 rte_memory_order_relaxed);
 		return;
 	}
 
@@ -2428,15 +2428,15 @@ enum hns3_hw_err_report_type {
 		return;
 
 	/* Schedule restart alarm if it is not scheduled yet */
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
 			SCHEDULE_REQUESTED)
 		return;
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
 			    SCHEDULE_DEFERRED)
 		rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
 
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
-				 __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				 rte_memory_order_relaxed);
 
 	rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
 }
@@ -2453,11 +2453,11 @@ enum hns3_hw_err_report_type {
 		return;
 	}
 
-	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) !=
 			    SCHEDULE_NONE)
 		return;
-	__atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
-			 __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+			 rte_memory_order_relaxed);
 	rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
 }
 
@@ -2537,7 +2537,7 @@ enum hns3_hw_err_report_type {
 }
 
 static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
 {
 	uint64_t merge_cnt = hw->reset.stats.merge_cnt;
 	uint64_t tmp;
@@ -2633,7 +2633,7 @@ enum hns3_hw_err_report_type {
 	 * Regardless of whether the execution is successful or not, the
 	 * flow after execution must be continued.
 	 */
-	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
 		(void)hns3_cmd_init(hw);
 reset_fail:
 	hw->reset.attempts = 0;
@@ -2661,7 +2661,7 @@ enum hns3_hw_err_report_type {
 	int ret;
 
 	if (hw->reset.stage == RESET_STAGE_NONE) {
-		__atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, rte_memory_order_relaxed);
 		hw->reset.stage = RESET_STAGE_DOWN;
 		hns3_report_reset_begin(hw);
 		ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2750,7 @@ enum hns3_hw_err_report_type {
 		hns3_notify_reset_ready(hw, false);
 		hns3_clear_reset_level(hw, &hw->reset.pending);
 		hns3_clear_reset_status(hw);
-		__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+		rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
 		hw->reset.attempts = 0;
 		hw->reset.stats.success_cnt++;
 		hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2812,7 @@ enum hns3_hw_err_report_type {
 		hw->reset.mbuf_deferred_free = false;
 	}
 	rte_spinlock_unlock(&hw->lock);
-	__atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+	rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
 	hw->reset.stage = RESET_STAGE_NONE;
 	hns3_clock_gettime(&tv);
 	timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c07..1edb07d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
 };
 
 int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
 void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
 void hns3_handle_error(struct hns3_adapter *hns);
 
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc16..10c6e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,7 @@
 
 	mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
 	while (wait_time < mbx_time_limit) {
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
 			hns3_err(hw, "Don't wait for mbx response because of "
 				 "disable_cmd");
 			return -EBUSY;
@@ -382,7 +382,7 @@
 	rte_spinlock_lock(&hw->cmq.crq.lock);
 
 	while (!hns3_cmd_crq_empty(hw)) {
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
 			rte_spinlock_unlock(&hw->cmq.crq.lock);
 			return;
 		}
@@ -457,7 +457,7 @@
 	}
 
 	while (!hns3_cmd_crq_empty(hw)) {
-		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
 			rte_spinlock_unlock(&hw->cmq.crq.lock);
 			return;
 		}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f194..ba8f8ec 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,7 @@
 	int i;
 
 	if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
-		__atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+		rte_atomic_load_explicit(&hw->secondary_cnt, rte_memory_order_relaxed) == 0)
 		return;
 
 	if (!mp_req_type_is_valid(type)) {
@@ -277,7 +277,7 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev)
 				     ret);
 			return ret;
 		}
-		__atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
 	} else {
 		ret = hns3_mp_init_primary();
 		if (ret) {
@@ -297,7 +297,7 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-		__atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+		rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
 
 	process_data.eth_dev_cnt--;
 	if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 7e636a0..73a388b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4464,7 +4464,7 @@
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 
 	if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
-	    __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+	    rte_atomic_load_explicit(&hns->hw.reset.resetting, rte_memory_order_relaxed) == 0) {
 		eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
 		eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
 		eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4530,7 +4530,7 @@
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to start Rx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4586,7 +4586,7 @@
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to stop Rx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4615,7 +4615,7 @@
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to start Tx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
@@ -4648,7 +4648,7 @@
 
 	rte_spinlock_lock(&hw->lock);
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		hns3_err(hw, "fail to stop Tx queue during resetting.");
 		rte_spinlock_unlock(&hw->lock);
 		return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d969164..92a6685 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,7 @@
 	if (error == NULL)
 		return -EINVAL;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 		error->message = "device is resetting";
 		/* don't goto fail_clear, user may try later */
@@ -1141,7 +1141,7 @@
 	if (error == NULL)
 		return -EINVAL;
 
-	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
 		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 		error->message = "device is resetting";
 		return -EBUSY;
-- 
1.8.3.1


  parent reply	other threads:[~2024-03-27 22:38 UTC|newest]

Thread overview: 200+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-20 20:50 [PATCH 00/46] use " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 01/46] net/mlx5: use rte " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 02/46] net/ixgbe: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 03/46] net/iavf: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 04/46] net/ice: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 05/46] net/i40e: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 06/46] net/hns3: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 07/46] net/bnxt: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 08/46] net/cpfl: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 09/46] net/af_xdp: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 10/46] net/octeon_ep: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 11/46] net/octeontx: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 12/46] net/cxgbe: " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 13/46] net/gve: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 14/46] net/memif: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 15/46] net/sfc: " Tyler Retzlaff
2024-03-21 18:11   ` Aaron Conole
2024-03-21 18:15     ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 16/46] net/thunderx: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 17/46] net/virtio: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 18/46] net/hinic: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 19/46] net/idpf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 20/46] net/qede: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 21/46] net/ring: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 22/46] vdpa/mlx5: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 23/46] raw/ifpga: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 24/46] event/opdl: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 25/46] event/octeontx: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 26/46] event/dsw: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 27/46] dma/skeleton: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 28/46] crypto/octeontx: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 29/46] common/mlx5: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 30/46] common/idpf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 31/46] common/iavf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 32/46] baseband/acc: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 33/46] net/txgbe: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 34/46] net/null: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 35/46] event/dlb2: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 36/46] dma/idxd: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 37/46] crypto/ccp: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 38/46] common/cpt: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 39/46] bus/vmbus: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 40/46] examples: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 41/46] app/dumpcap: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 42/46] app/test: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 43/46] app/test-eventdev: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 44/46] app/test-crypto-perf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 45/46] app/test-compress-perf: " Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 46/46] app/test-bbdev: " Tyler Retzlaff
2024-03-21 15:33 ` [PATCH 00/46] use " Stephen Hemminger
2024-03-21 16:22   ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 02/45] net/ixgbe: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 03/45] net/iavf: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 04/45] net/ice: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 05/45] net/i40e: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 06/45] net/hns3: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 07/45] net/bnxt: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 08/45] net/cpfl: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 09/45] net/af_xdp: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 10/45] net/octeon_ep: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 11/45] net/octeontx: " Tyler Retzlaff
2024-03-21 19:16   ` [PATCH v2 12/45] net/cxgbe: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 13/45] net/gve: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 14/45] net/memif: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 15/45] net/thunderx: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 16/45] net/virtio: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 17/45] net/hinic: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 18/45] net/idpf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 19/45] net/qede: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 20/45] net/ring: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 21/45] vdpa/mlx5: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 22/45] raw/ifpga: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 23/45] event/opdl: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 24/45] event/octeontx: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 25/45] event/dsw: " Tyler Retzlaff
2024-03-21 20:51     ` Mattias Rönnblom
2024-03-21 19:17   ` [PATCH v2 26/45] dma/skeleton: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 27/45] crypto/octeontx: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 28/45] common/mlx5: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 29/45] common/idpf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 30/45] common/iavf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 31/45] baseband/acc: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 32/45] net/txgbe: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 33/45] net/null: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 34/45] event/dlb2: " Tyler Retzlaff
2024-03-21 21:03     ` Mattias Rönnblom
2024-04-09 19:31       ` Sevincer, Abdullah
2024-03-21 19:17   ` [PATCH v2 35/45] dma/idxd: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 36/45] crypto/ccp: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 37/45] common/cpt: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 38/45] bus/vmbus: " Tyler Retzlaff
2024-03-21 21:12     ` Mattias Rönnblom
2024-03-21 21:34       ` Long Li
2024-03-22  7:04         ` Mattias Rönnblom
2024-03-22 19:32           ` Long Li
2024-03-22 19:34     ` Long Li
2024-03-25 16:41       ` Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 39/45] examples: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 40/45] app/dumpcap: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 41/45] app/test: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 42/45] app/test-eventdev: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 43/45] app/test-crypto-perf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 44/45] app/test-compress-perf: " Tyler Retzlaff
2024-03-21 19:17   ` [PATCH v2 45/45] app/test-bbdev: " Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 02/45] net/ixgbe: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 03/45] net/iavf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 04/45] net/ice: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 05/45] net/i40e: " Tyler Retzlaff
2024-03-27 22:37   ` Tyler Retzlaff [this message]
2024-03-27 22:37   ` [PATCH v3 07/45] net/bnxt: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 08/45] net/cpfl: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 09/45] net/af_xdp: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 10/45] net/octeon_ep: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 11/45] net/octeontx: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 12/45] net/cxgbe: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 13/45] net/gve: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 14/45] net/memif: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 15/45] net/thunderx: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 16/45] net/virtio: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 17/45] net/hinic: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 18/45] net/idpf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 19/45] net/qede: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 20/45] net/ring: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 21/45] vdpa/mlx5: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 22/45] raw/ifpga: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 23/45] event/opdl: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 24/45] event/octeontx: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 25/45] event/dsw: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 26/45] dma/skeleton: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 27/45] crypto/octeontx: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 28/45] common/mlx5: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 29/45] common/idpf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 30/45] common/iavf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 31/45] baseband/acc: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 32/45] net/txgbe: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 33/45] net/null: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 34/45] event/dlb2: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 35/45] dma/idxd: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 36/45] crypto/ccp: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 37/45] common/cpt: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 38/45] bus/vmbus: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 39/45] examples: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 40/45] app/dumpcap: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 41/45] app/test: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 42/45] app/test-eventdev: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 43/45] app/test-crypto-perf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 44/45] app/test-compress-perf: " Tyler Retzlaff
2024-03-27 22:37   ` [PATCH v3 45/45] app/test-bbdev: " Tyler Retzlaff
2024-03-29  2:07   ` [PATCH v3 00/45] use " Tyler Retzlaff
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
2024-04-19 23:05   ` [PATCH v4 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-04-20  8:03     ` Morten Brørup
2024-04-19 23:06   ` [PATCH v4 02/45] net/ixgbe: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 03/45] net/iavf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 04/45] net/ice: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 05/45] net/i40e: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 06/45] net/hns3: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 07/45] net/bnxt: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 08/45] net/cpfl: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 09/45] net/af_xdp: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 10/45] net/octeon_ep: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 11/45] net/octeontx: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 12/45] net/cxgbe: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 13/45] net/gve: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 14/45] net/memif: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 15/45] net/thunderx: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 16/45] net/virtio: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 17/45] net/hinic: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 18/45] net/idpf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 19/45] net/qede: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 20/45] net/ring: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 21/45] vdpa/mlx5: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 22/45] raw/ifpga: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 23/45] event/opdl: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 24/45] event/octeontx: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 25/45] event/dsw: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 26/45] dma/skeleton: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 27/45] crypto/octeontx: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 28/45] common/mlx5: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 29/45] common/idpf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 30/45] common/iavf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 31/45] baseband/acc: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 32/45] net/txgbe: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 33/45] net/null: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 34/45] event/dlb2: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 35/45] dma/idxd: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 36/45] crypto/ccp: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 37/45] common/cpt: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 38/45] bus/vmbus: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 39/45] examples: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 40/45] app/dumpcap: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 41/45] app/test: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 42/45] app/test-eventdev: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 43/45] app/test-crypto-perf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 44/45] app/test-compress-perf: " Tyler Retzlaff
2024-04-19 23:06   ` [PATCH v4 45/45] app/test-bbdev: " Tyler Retzlaff

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1711579078-10624-7-git-send-email-roretzla@linux.microsoft.com \
    --to=roretzla@linux.microsoft.com \
    --cc=Yuying.Zhang@intel.com \
    --cc=abdullah.sevincer@intel.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=anatoly.burakov@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=anoobj@marvell.com \
    --cc=bruce.richardson@intel.com \
    --cc=byron.marohn@intel.com \
    --cc=chenbox@nvidia.com \
    --cc=ciara.loftus@intel.com \
    --cc=ciara.power@intel.com \
    --cc=cloud.wangxiaoyun@huawei.com \
    --cc=david.hunt@intel.com \
    --cc=dev@dpdk.org \
    --cc=dsinghrawat@marvell.com \
    --cc=dsosnowski@nvidia.com \
    --cc=erik.g.carrillo@intel.com \
    --cc=fengchengwen@huawei.com \
    --cc=haijie1@huawei.com \
    --cc=harry.van.haaren@intel.com \
    --cc=hkalra@marvell.com \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=jerinj@marvell.com \
    --cc=jeroendb@google.com \
    --cc=jgrajcia@cisco.com \
    --cc=jianwang@trustnetic.com \
    --cc=jiawenwu@trustnetic.com \
    --cc=jingjing.wu@intel.com \
    --cc=joshwash@google.com \
    --cc=joyce.kong@arm.com \
    --cc=junfeng.guo@intel.com \
    --cc=kevin.laatz@intel.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=liangma@liangbit.com \
    --cc=longli@microsoft.com \
    --cc=matan@nvidia.com \
    --cc=mattias.ronnblom@ericsson.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mb@smartsharesystems.com \
    --cc=mczekaj@marvell.com \
    --cc=mtetsuyah@gmail.com \
    --cc=nicolas.chautru@intel.com \
    --cc=orika@nvidia.com \
    --cc=palok@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=peter.mccarthy@intel.com \
    --cc=rahul.lakkireddy@chelsio.com \
    --cc=reshma.pattan@intel.com \
    --cc=rosen.xu@intel.com \
    --cc=ruifeng.wang@arm.com \
    --cc=rushilg@google.com \
    --cc=sameh.gobriel@intel.com \
    --cc=sivaprasad.tummala@amd.com \
    --cc=skori@marvell.com \
    --cc=somnath.kotur@broadcom.com \
    --cc=stephen@networkplumber.org \
    --cc=suanmingm@nvidia.com \
    --cc=sunilprakashrao.uttarwar@amd.com \
    --cc=vattunuru@marvell.com \
    --cc=viacheslavo@nvidia.com \
    --cc=vladimir.medvedkin@intel.com \
    --cc=xuanziyang2@huawei.com \
    --cc=yipeng1.wang@intel.com \
    --cc=yisen.zhuang@huawei.com \
    --cc=zhouguoyang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).