From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A94D0432FA; Sat, 11 Nov 2023 04:20:25 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7D2F8402A6; Sat, 11 Nov 2023 04:20:25 +0100 (CET) Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by mails.dpdk.org (Postfix) with ESMTP id 2D2EB4025C for ; Sat, 11 Nov 2023 04:20:23 +0100 (CET) Received: from kwepemm000004.china.huawei.com (unknown [172.30.72.55]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4SS16v6SXBzPnmK; Sat, 11 Nov 2023 11:16:07 +0800 (CST) Received: from [10.67.121.59] (10.67.121.59) by kwepemm000004.china.huawei.com (7.193.23.18) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.31; Sat, 11 Nov 2023 11:20:17 +0800 Message-ID: <0e8f68c9-8786-057a-c1db-11d256e13b5e@huawei.com> Date: Sat, 11 Nov 2023 11:20:17 +0800 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Thunderbird/91.2.0 Subject: Re: [PATCH 2/2] net/hns3: use stdatomic API To: Jie Hai , , , Yisen Zhuang CC: References: <20231111015915.2776769-1-haijie1@huawei.com> <20231111015915.2776769-3-haijie1@huawei.com> From: "lihuisong (C)" In-Reply-To: <20231111015915.2776769-3-haijie1@huawei.com> Content-Type: text/plain; charset="UTF-8"; format=flowed Content-Transfer-Encoding: 8bit X-Originating-IP: [10.67.121.59] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemm000004.china.huawei.com (7.193.23.18) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Reviewed-by: Huisong Li 在 2023/11/11 9:59, Jie Hai 写道: > Replace the use of gcc builtin __atomic_xxx intrinsics with > corresponding rte_atomic_xxx optional stdatomic API. > > Signed-off-by: Jie Hai > --- > drivers/net/hns3/hns3_cmd.c | 22 +++++++----- > drivers/net/hns3/hns3_dcb.c | 3 +- > drivers/net/hns3/hns3_ethdev.c | 51 ++++++++++++++++----------- > drivers/net/hns3/hns3_ethdev.h | 12 ++++--- > drivers/net/hns3/hns3_ethdev_vf.c | 57 ++++++++++++++++--------------- > drivers/net/hns3/hns3_intr.c | 39 ++++++++++++--------- > drivers/net/hns3/hns3_mbx.c | 6 ++-- > drivers/net/hns3/hns3_mp.c | 9 +++-- > drivers/net/hns3/hns3_rxtx.c | 15 +++++--- > drivers/net/hns3/hns3_tm.c | 6 ++-- > 10 files changed, 131 insertions(+), 89 deletions(-) > > diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c > index 2c1664485bef..4e1a02a75e0f 100644 > --- a/drivers/net/hns3/hns3_cmd.c > +++ b/drivers/net/hns3/hns3_cmd.c > @@ -49,7 +49,8 @@ hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring, > char z_name[RTE_MEMZONE_NAMESIZE]; > > snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, > - __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED)); > + rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, > + rte_memory_order_relaxed)); > mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, > RTE_MEMZONE_IOVA_CONTIG, alignment, > RTE_PGSIZE_2M); > @@ -198,8 +199,8 @@ hns3_cmd_csq_clean(struct hns3_hw *hw) > hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head, > csq->next_to_use, csq->next_to_clean); > if (rte_eal_process_type() == RTE_PROC_PRIMARY) { > - __atomic_store_n(&hw->reset.disable_cmd, 1, > - __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, > + rte_memory_order_relaxed); > hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); > } > > @@ -313,7 +314,8 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw) > if (hns3_cmd_csq_done(hw)) > return 0; > > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, > + rte_memory_order_relaxed)) { > hns3_err(hw, > "Don't wait for reply because of disable_cmd"); > return -EBUSY; > @@ -360,7 +362,8 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num) > int retval; > uint32_t ntc; > > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, > + rte_memory_order_relaxed)) > return -EBUSY; > > rte_spinlock_lock(&hw->cmq.csq.lock); > @@ -745,7 +748,8 @@ hns3_cmd_init(struct hns3_hw *hw) > ret = -EBUSY; > goto err_cmd_init; > } > - __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, > + rte_memory_order_relaxed); > > ret = hns3_cmd_query_firmware_version_and_capability(hw); > if (ret) { > @@ -788,7 +792,8 @@ hns3_cmd_init(struct hns3_hw *hw) > return 0; > > err_cmd_init: > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, > + rte_memory_order_relaxed); > return ret; > } > > @@ -817,7 +822,8 @@ hns3_cmd_uninit(struct hns3_hw *hw) > if (!hns->is_vf) > (void)hns3_firmware_compat_config(hw, false); > > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, > + rte_memory_order_relaxed); > > /* > * A delay is added to ensure that the register cleanup operations > diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c > index 2831d3dc6205..08c77e04857d 100644 > --- a/drivers/net/hns3/hns3_dcb.c > +++ b/drivers/net/hns3/hns3_dcb.c > @@ -648,7 +648,8 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q) > * and configured directly to the hardware in the RESET_STAGE_RESTORE > * stage of the reset process. > */ > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed) == 0) { > for (i = 0; i < hw->rss_ind_tbl_size; i++) > rss_cfg->rss_indirection_tbl[i] = > i % hw->alloc_rss_size; > diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c > index 941d047bf1bd..4b63308e8fdf 100644 > --- a/drivers/net/hns3/hns3_ethdev.c > +++ b/drivers/net/hns3/hns3_ethdev.c > @@ -134,7 +134,8 @@ hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) > { > struct hns3_hw *hw = &hns->hw; > > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, > + rte_memory_order_relaxed); > hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); > *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); > hw->reset.stats.imp_cnt++; > @@ -148,7 +149,8 @@ hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) > { > struct hns3_hw *hw = &hns->hw; > > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, > + rte_memory_order_relaxed); > hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); > *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); > hw->reset.stats.global_cnt++; > @@ -1151,7 +1153,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns) > * ensure that the hardware configuration remains unchanged before and > * after reset. > */ > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed) == 0) { > hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE; > hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID; > } > @@ -1175,7 +1178,8 @@ hns3_init_vlan_config(struct hns3_adapter *hns) > * we will restore configurations to hardware in hns3_restore_vlan_table > * and hns3_restore_vlan_conf later. > */ > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed) == 0) { > ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0); > if (ret) { > hns3_err(hw, "pvid set fail in pf, ret =%d", ret); > @@ -5059,7 +5063,8 @@ hns3_dev_start(struct rte_eth_dev *dev) > int ret; > > PMD_INIT_FUNC_TRACE(); > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed)) > return -EBUSY; > > rte_spinlock_lock(&hw->lock); > @@ -5150,7 +5155,8 @@ hns3_do_stop(struct hns3_adapter *hns) > * during reset and is required to be released after the reset is > * completed. > */ > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed) == 0) > hns3_dev_release_mbufs(hns); > > ret = hns3_cfg_mac_mode(hw, false); > @@ -5158,7 +5164,8 @@ hns3_do_stop(struct hns3_adapter *hns) > return ret; > hw->mac.link_status = RTE_ETH_LINK_DOWN; > > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, > + rte_memory_order_relaxed) == 0) { > hns3_configure_all_mac_addr(hns, true); > ret = hns3_reset_all_tqps(hns); > if (ret) { > @@ -5184,7 +5191,8 @@ hns3_dev_stop(struct rte_eth_dev *dev) > hns3_stop_rxtx_datapath(dev); > > rte_spinlock_lock(&hw->lock); > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed) == 0) { > hns3_tm_dev_stop_proc(hw); > hns3_config_mac_tnl_int(hw, false); > hns3_stop_tqps(hw); > @@ -5553,10 +5561,12 @@ hns3_detect_reset_event(struct hns3_hw *hw) > last_req = hns3_get_reset_level(hns, &hw->reset.pending); > vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); > if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, > + rte_memory_order_relaxed); > new_req = HNS3_IMP_RESET; > } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, > + rte_memory_order_relaxed); > new_req = HNS3_GLOBAL_RESET; > } > > @@ -5744,7 +5754,8 @@ hns3_prepare_reset(struct hns3_adapter *hns) > * any mailbox handling or command to firmware is only valid > * after hns3_cmd_init is called. > */ > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, > + rte_memory_order_relaxed); > hw->reset.stats.request_cnt++; > break; > case HNS3_IMP_RESET: > @@ -5799,7 +5810,8 @@ hns3_stop_service(struct hns3_adapter *hns) > * from table space. Hence, for function reset software intervention is > * required to delete the entries > */ > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, > + rte_memory_order_relaxed) == 0) > hns3_configure_all_mc_mac_addr(hns, true); > rte_spinlock_unlock(&hw->lock); > > @@ -5920,10 +5932,10 @@ hns3_reset_service(void *param) > * The interrupt may have been lost. It is necessary to handle > * the interrupt to recover from the error. > */ > - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == > - SCHEDULE_DEFERRED) { > - __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, > - __ATOMIC_RELAXED); > + if (rte_atomic_load_explicit(&hw->reset.schedule, > + rte_memory_order_relaxed) == SCHEDULE_DEFERRED) { > + rte_atomic_store_explicit(&hw->reset.schedule, > + SCHEDULE_REQUESTED, rte_memory_order_relaxed); > hns3_err(hw, "Handling interrupts in delayed tasks"); > hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); > reset_level = hns3_get_reset_level(hns, &hw->reset.pending); > @@ -5932,7 +5944,8 @@ hns3_reset_service(void *param) > hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); > } > } > - __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, > + rte_memory_order_relaxed); > > /* > * Check if there is any ongoing reset in the hardware. This status can > @@ -6582,8 +6595,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) > > hw->adapter_state = HNS3_NIC_INITIALIZED; > > - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == > - SCHEDULE_PENDING) { > + if (rte_atomic_load_explicit(&hw->reset.schedule, > + rte_memory_order_relaxed) == SCHEDULE_PENDING) { > hns3_err(hw, "Reschedule reset service after dev_init"); > hns3_schedule_reset(hns); > } else { > diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h > index 668f141e32ed..a0d62a5fd33f 100644 > --- a/drivers/net/hns3/hns3_ethdev.h > +++ b/drivers/net/hns3/hns3_ethdev.h > @@ -999,20 +999,23 @@ hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) > { > uint64_t res; > > - res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0; > + res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & > + (1UL << nr)) != 0; > return res; > } > > static inline void > hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr) > { > - __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED); > + rte_atomic_fetch_or_explicit(addr, (1UL << nr), > + rte_memory_order_relaxed); > } > > static inline void > hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr) > { > - __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED); > + rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), > + rte_memory_order_relaxed); > } > > static inline uint64_t > @@ -1020,7 +1023,8 @@ hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr) > { > uint64_t mask = (1UL << nr); > > - return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask; > + return rte_atomic_fetch_and_explicit(addr, > + ~mask, rte_memory_order_relaxed) & mask; > } > > int > diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c > index 156fb905f990..51d17ee8a726 100644 > --- a/drivers/net/hns3/hns3_ethdev_vf.c > +++ b/drivers/net/hns3/hns3_ethdev_vf.c > @@ -478,7 +478,7 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) > * MTU value issued by hns3 VF PMD must be less than or equal to > * PF's MTU. > */ > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) { > hns3_err(hw, "Failed to set mtu during resetting"); > return -EIO; > } > @@ -546,7 +546,7 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) > rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING); > hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg); > hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); > val = hns3_read_dev(hw, HNS3_VF_RST_ING); > hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); > val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); > @@ -618,8 +618,8 @@ hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported) > struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); > > if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN) > - __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0, > - __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); > + rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, > + &exp, &val, rte_memory_order_acquire, rte_memory_order_acquire); > } > > static void > @@ -633,8 +633,8 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) > uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; > struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); > > - __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, > - __ATOMIC_RELEASE); > + rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, > + rte_memory_order_release); > > (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, > NULL, 0); > @@ -649,7 +649,7 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) > * mailbox from PF driver to get this capability. > */ > hns3_dev_handle_mbx_msg(hw); > - if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != > + if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) != > HNS3_PF_PUSH_LSC_CAP_UNKNOWN) > break; > remain_ms--; > @@ -660,10 +660,10 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) > * state: unknown (means pf not ack), not_supported, supported. > * Here config it as 'not_supported' when it's 'unknown' state. > */ > - __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0, > - __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); > + rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, > + &val, rte_memory_order_acquire, rte_memory_order_acquire); > > - if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) == > + if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) == > HNS3_PF_PUSH_LSC_CAP_SUPPORTED) { > hns3_info(hw, "detect PF support push link status change!"); > } else { > @@ -897,7 +897,7 @@ hns3vf_request_link_info(struct hns3_hw *hw) > bool send_req; > int ret; > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) > + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) > return; > > send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED || > @@ -933,7 +933,7 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, > * sending request to PF kernel driver, then could update link status by > * process PF kernel driver's link status mailbox message. > */ > - if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED)) > + if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed)) > return; > > if (hw->adapter_state != HNS3_NIC_STARTED) > @@ -972,7 +972,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) > struct hns3_hw *hw = &hns->hw; > int ret; > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) { > hns3_err(hw, > "vf set vlan id failed during resetting, vlan_id =%u", > vlan_id); > @@ -1032,7 +1032,7 @@ hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask) > unsigned int tmp_mask; > int ret = 0; > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) { > hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x", > mask); > return -EIO; > @@ -1222,7 +1222,7 @@ hns3vf_start_poll_job(struct rte_eth_dev *dev) > if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED) > vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT; > > - __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed); > > hns3vf_service_handler(dev); > } > @@ -1234,7 +1234,7 @@ hns3vf_stop_poll_job(struct rte_eth_dev *dev) > > rte_eal_alarm_cancel(hns3vf_service_handler, dev); > > - __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed); > } > > static int > @@ -1468,10 +1468,10 @@ hns3vf_do_stop(struct hns3_adapter *hns) > * during reset and is required to be released after the reset is > * completed. > */ > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) > + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) > hns3_dev_release_mbufs(hns); > > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) { > hns3_configure_all_mac_addr(hns, true); > ret = hns3_reset_all_tqps(hns); > if (ret) { > @@ -1496,7 +1496,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) > hns3_stop_rxtx_datapath(dev); > > rte_spinlock_lock(&hw->lock); > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) { > hns3_stop_tqps(hw); > hns3vf_do_stop(hns); > hns3_unmap_rx_interrupt(dev); > @@ -1611,7 +1611,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) > int ret; > > PMD_INIT_FUNC_TRACE(); > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) > + if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) > return -EBUSY; > > rte_spinlock_lock(&hw->lock); > @@ -1795,7 +1795,7 @@ hns3vf_prepare_reset(struct hns3_adapter *hns) > if (ret) > return ret; > } > - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed); > > return 0; > } > @@ -1836,7 +1836,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) > * from table space. Hence, for function reset software intervention is > * required to delete the entries. > */ > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) > hns3_configure_all_mc_mac_addr(hns, true); > rte_spinlock_unlock(&hw->lock); > > @@ -2018,10 +2018,10 @@ hns3vf_reset_service(void *param) > * The interrupt may have been lost. It is necessary to handle > * the interrupt to recover from the error. > */ > - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == > + if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) == > SCHEDULE_DEFERRED) { > - __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, > - __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED, > + rte_memory_order_relaxed); > hns3_err(hw, "Handling interrupts in delayed tasks"); > hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]); > reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); > @@ -2030,7 +2030,7 @@ hns3vf_reset_service(void *param) > hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); > } > } > - __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed); > > /* > * Hardware reset has been notified, we now have to poll & check if > @@ -2225,8 +2225,9 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) > > hw->adapter_state = HNS3_NIC_INITIALIZED; > > - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == > - SCHEDULE_PENDING) { > + if (rte_atomic_load_explicit(&hw->reset.schedule, > + rte_memory_order_relaxed) == > + SCHEDULE_PENDING) { > hns3_err(hw, "Reschedule reset service after dev_init"); > hns3_schedule_reset(hns); > } else { > diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c > index c5a3e3797cbd..cb758cf3a9b7 100644 > --- a/drivers/net/hns3/hns3_intr.c > +++ b/drivers/net/hns3/hns3_intr.c > @@ -2402,7 +2402,8 @@ hns3_reset_init(struct hns3_hw *hw) > hw->reset.request = 0; > hw->reset.pending = 0; > hw->reset.resetting = 0; > - __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, > + rte_memory_order_relaxed); > hw->reset.wait_data = rte_zmalloc("wait_data", > sizeof(struct hns3_wait_data), 0); > if (!hw->reset.wait_data) { > @@ -2419,8 +2420,8 @@ hns3_schedule_reset(struct hns3_adapter *hns) > > /* Reschedule the reset process after successful initialization */ > if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) { > - __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING, > - __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING, > + rte_memory_order_relaxed); > return; > } > > @@ -2428,15 +2429,15 @@ hns3_schedule_reset(struct hns3_adapter *hns) > return; > > /* Schedule restart alarm if it is not scheduled yet */ > - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == > - SCHEDULE_REQUESTED) > + if (rte_atomic_load_explicit(&hw->reset.schedule, > + rte_memory_order_relaxed) == SCHEDULE_REQUESTED) > return; > - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == > - SCHEDULE_DEFERRED) > + if (rte_atomic_load_explicit(&hw->reset.schedule, > + rte_memory_order_relaxed) == SCHEDULE_DEFERRED) > rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); > > - __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, > - __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED, > + rte_memory_order_relaxed); > > rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); > } > @@ -2453,11 +2454,11 @@ hns3_schedule_delayed_reset(struct hns3_adapter *hns) > return; > } > > - if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) != > - SCHEDULE_NONE) > + if (rte_atomic_load_explicit(&hw->reset.schedule, > + rte_memory_order_relaxed) != SCHEDULE_NONE) > return; > - __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED, > - __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED, > + rte_memory_order_relaxed); > rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns); > } > > @@ -2633,7 +2634,8 @@ hns3_reset_err_handle(struct hns3_adapter *hns) > * Regardless of whether the execution is successful or not, the > * flow after execution must be continued. > */ > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, > + rte_memory_order_relaxed)) > (void)hns3_cmd_init(hw); > reset_fail: > hw->reset.attempts = 0; > @@ -2661,7 +2663,8 @@ hns3_reset_pre(struct hns3_adapter *hns) > int ret; > > if (hw->reset.stage == RESET_STAGE_NONE) { > - __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, > + rte_memory_order_relaxed); > hw->reset.stage = RESET_STAGE_DOWN; > hns3_report_reset_begin(hw); > ret = hw->reset.ops->stop_service(hns); > @@ -2750,7 +2753,8 @@ hns3_reset_post(struct hns3_adapter *hns) > hns3_notify_reset_ready(hw, false); > hns3_clear_reset_level(hw, &hw->reset.pending); > hns3_clear_reset_event(hw); > - __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, > + rte_memory_order_relaxed); > hw->reset.attempts = 0; > hw->reset.stats.success_cnt++; > hw->reset.stage = RESET_STAGE_NONE; > @@ -2812,7 +2816,8 @@ hns3_reset_fail_handle(struct hns3_adapter *hns) > hw->reset.mbuf_deferred_free = false; > } > rte_spinlock_unlock(&hw->lock); > - __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); > + rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, > + rte_memory_order_relaxed); > hw->reset.stage = RESET_STAGE_NONE; > hns3_clock_gettime(&tv); > timersub(&tv, &hw->reset.start_time, &tv_delta); > diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c > index f1743c195efa..7af56ff23deb 100644 > --- a/drivers/net/hns3/hns3_mbx.c > +++ b/drivers/net/hns3/hns3_mbx.c > @@ -59,7 +59,8 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, > > mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS; > while (wait_time < mbx_time_limit) { > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, > + rte_memory_order_relaxed)) { > hns3_err(hw, "Don't wait for mbx response because of " > "disable_cmd"); > return -EBUSY; > @@ -425,7 +426,8 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) > } > > while (!hns3_cmd_crq_empty(hw)) { > - if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.disable_cmd, > + rte_memory_order_relaxed)) { > rte_spinlock_unlock(&hw->cmq.crq.lock); > return; > } > diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c > index 556f1941c6b2..8ee97a7c598a 100644 > --- a/drivers/net/hns3/hns3_mp.c > +++ b/drivers/net/hns3/hns3_mp.c > @@ -151,7 +151,8 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum hns3_mp_req_type type) > int i; > > if (rte_eal_process_type() == RTE_PROC_SECONDARY || > - __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0) > + rte_atomic_load_explicit(&hw->secondary_cnt, > + rte_memory_order_relaxed) == 0) > return; > > if (!mp_req_type_is_valid(type)) { > @@ -277,7 +278,8 @@ hns3_mp_init(struct rte_eth_dev *dev) > ret); > return ret; > } > - __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); > + rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, > + rte_memory_order_relaxed); > } else { > ret = hns3_mp_init_primary(); > if (ret) { > @@ -297,7 +299,8 @@ void hns3_mp_uninit(struct rte_eth_dev *dev) > struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); > > if (rte_eal_process_type() != RTE_PROC_PRIMARY) > - __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED); > + rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, > + rte_memory_order_relaxed); > > process_data.eth_dev_cnt--; > if (process_data.eth_dev_cnt == 0) { > diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c > index 09b7e90c7000..bb600475e91e 100644 > --- a/drivers/net/hns3/hns3_rxtx.c > +++ b/drivers/net/hns3/hns3_rxtx.c > @@ -4465,7 +4465,8 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) > struct hns3_adapter *hns = eth_dev->data->dev_private; > > if (hns->hw.adapter_state == HNS3_NIC_STARTED && > - __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { > + rte_atomic_load_explicit(&hns->hw.reset.resetting, > + rte_memory_order_relaxed) == 0) { > eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev); > eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; > eth_dev->tx_pkt_burst = hw->set_link_down ? > @@ -4531,7 +4532,8 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) > > rte_spinlock_lock(&hw->lock); > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed)) { > hns3_err(hw, "fail to start Rx queue during resetting."); > rte_spinlock_unlock(&hw->lock); > return -EIO; > @@ -4587,7 +4589,8 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) > > rte_spinlock_lock(&hw->lock); > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed)) { > hns3_err(hw, "fail to stop Rx queue during resetting."); > rte_spinlock_unlock(&hw->lock); > return -EIO; > @@ -4616,7 +4619,8 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) > > rte_spinlock_lock(&hw->lock); > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed)) { > hns3_err(hw, "fail to start Tx queue during resetting."); > rte_spinlock_unlock(&hw->lock); > return -EIO; > @@ -4649,7 +4653,8 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) > > rte_spinlock_lock(&hw->lock); > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed)) { > hns3_err(hw, "fail to stop Tx queue during resetting."); > rte_spinlock_unlock(&hw->lock); > return -EIO; > diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c > index d9691640140b..656db9b170b2 100644 > --- a/drivers/net/hns3/hns3_tm.c > +++ b/drivers/net/hns3/hns3_tm.c > @@ -1051,7 +1051,8 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev, > if (error == NULL) > return -EINVAL; > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed)) { > error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; > error->message = "device is resetting"; > /* don't goto fail_clear, user may try later */ > @@ -1141,7 +1142,8 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev, > if (error == NULL) > return -EINVAL; > > - if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { > + if (rte_atomic_load_explicit(&hw->reset.resetting, > + rte_memory_order_relaxed)) { > error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; > error->message = "device is resetting"; > return -EBUSY;