* [PATCH 01/46] net/mlx5: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 02/46] net/ixgbe: " Tyler Retzlaff
` (50 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 ++-
drivers/net/mlx5/mlx5.c | 9 ++-
drivers/net/mlx5/mlx5.h | 66 ++++++++---------
drivers/net/mlx5/mlx5_flow.c | 37 +++++-----
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 ++++++-----
drivers/net/mlx5/mlx5_flow_dv.c | 126 ++++++++++++++++----------------
drivers/net/mlx5/mlx5_flow_flex.c | 14 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++++++++-------
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++++----
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++++----
drivers/net/mlx5/mlx5_hws_cnt.c | 71 +++++++++---------
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +--
drivers/net/mlx5/mlx5_rx.h | 14 ++--
drivers/net/mlx5/mlx5_rxq.c | 30 ++++----
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 ++---
drivers/net/mlx5/mlx5_txpp.c | 84 ++++++++++-----------
drivers/net/mlx5/mlx5_txq.c | 12 +--
drivers/net/mlx5/mlx5_utils.c | 10 +--
drivers/net/mlx5/mlx5_utils.h | 4 +-
22 files changed, 351 insertions(+), 345 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index 40ea9d2..70bba6c 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -1918,9 +1918,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
return -ENOTSUP;
}
/* Check there is no concurrent mapping in other thread. */
- if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
- base, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
+ base,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
return 0;
}
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index b54f3cc..63da8f4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1117,7 +1117,7 @@
return 0;
}
/* Only need to check refcnt, 0 after "sh" is allocated. */
- if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+ if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
priv->lb_used = 1;
return 0;
@@ -1163,7 +1163,7 @@
claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
sh->self_lb.ibv_cq = NULL;
}
- __atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
return -rte_errno;
#else
RTE_SET_USED(dev);
@@ -1186,8 +1186,9 @@
if (!priv->lb_used)
return;
- MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
- if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {
+ MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
+ if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
if (sh->self_lb.qp) {
claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
sh->self_lb.qp = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d1a6382..2ff94db 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -855,8 +855,8 @@
ct_pool = mng->pools[idx];
for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
ct = &ct_pool->actions[i];
- val = __atomic_fetch_sub(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
MLX5_ASSERT(val == 1);
if (val > 1)
cnt++;
@@ -1082,7 +1082,8 @@
DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
return -ENOTSUP;
}
- if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
+ if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
+ rte_memory_order_relaxed) + 1 > 1)
return 0;
priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
@@ -1173,7 +1174,7 @@
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
- if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
return;
mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
mlx5_free(fp->flex.devx_fp);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0091a24..77c84b8 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -378,7 +378,7 @@ struct mlx5_drop {
struct mlx5_lb_ctx {
struct ibv_qp *qp; /* QP object. */
void *ibv_cq; /* Completion queue. */
- uint16_t refcnt; /* Reference count for representors. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
/* HW steering queue job descriptor type. */
@@ -481,10 +481,10 @@ enum mlx5_counter_type {
/* Counter age parameter. */
struct mlx5_age_param {
- uint16_t state; /**< Age state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
uint16_t port_id; /**< Port id of the counter. */
uint32_t timeout:24; /**< Aging timeout in seconds. */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/**< Time in seconds since last hit (atomically accessed). */
void *context; /**< Flow counter age context. */
};
@@ -497,7 +497,7 @@ struct flow_counter_stats {
/* Shared counters information for counters. */
struct mlx5_flow_counter_shared {
union {
- uint32_t refcnt; /* Only for shared action management. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
uint32_t id; /* User counter ID for legacy sharing. */
};
};
@@ -588,7 +588,7 @@ struct mlx5_counter_stats_raw {
/* Counter global management structure. */
struct mlx5_flow_counter_mng {
- volatile uint16_t n_valid; /* Number of valid pools. */
+ volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
uint16_t last_pool_idx; /* Last used pool index */
int min_id; /* The minimum counter ID in the pools. */
int max_id; /* The maximum counter ID in the pools. */
@@ -654,7 +654,7 @@ struct mlx5_aso_sq {
struct mlx5_aso_age_action {
LIST_ENTRY(mlx5_aso_age_action) next;
void *dr_action;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
/* Following fields relevant only when action is active. */
uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
struct mlx5_age_param age_params;
@@ -688,7 +688,7 @@ struct mlx5_geneve_tlv_option_resource {
rte_be16_t option_class; /* geneve tlv opt class.*/
uint8_t option_type; /* geneve tlv opt type.*/
uint8_t length; /* geneve tlv opt length. */
- uint32_t refcnt; /* geneve tlv object reference counter */
+ RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
};
@@ -903,7 +903,7 @@ struct mlx5_flow_meter_policy {
uint16_t group;
/* The group. */
rte_spinlock_t sl;
- uint32_t ref_cnt;
+ RTE_ATOMIC(uint32_t) ref_cnt;
/* Use count. */
struct rte_flow_pattern_template *hws_item_templ;
/* Hardware steering item templates. */
@@ -1038,7 +1038,7 @@ struct mlx5_flow_meter_profile {
struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
/**< srtcm_rfc2697 struct. */
};
- uint32_t ref_cnt; /**< Use count. */
+ RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
uint32_t g_support:1; /**< If G color will be generated. */
uint32_t y_support:1; /**< If Y color will be generated. */
uint32_t initialized:1; /**< Initialized. */
@@ -1078,7 +1078,7 @@ struct mlx5_aso_mtr {
enum mlx5_aso_mtr_type type;
struct mlx5_flow_meter_info fm;
/**< Pointer to the next aso flow meter structure. */
- uint8_t state; /**< ASO flow meter state. */
+ RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
uint32_t offset;
enum rte_color init_color;
};
@@ -1124,7 +1124,7 @@ struct mlx5_flow_mtr_mng {
/* Default policy table. */
uint32_t def_policy_id;
/* Default policy id. */
- uint32_t def_policy_ref_cnt;
+ RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
/** def_policy meter use count. */
struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
/* Meter drop table. */
@@ -1197,8 +1197,8 @@ struct mlx5_txpp_wq {
/* Tx packet pacing internal timestamp. */
struct mlx5_txpp_ts {
- uint64_t ci_ts;
- uint64_t ts;
+ RTE_ATOMIC(uint64_t) ci_ts;
+ RTE_ATOMIC(uint64_t) ts;
};
/* Tx packet pacing structure. */
@@ -1221,12 +1221,12 @@ struct mlx5_dev_txpp {
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
/* Statistics counters. */
- uint64_t err_miss_int; /* Missed service interrupt. */
- uint64_t err_rearm_queue; /* Rearm Queue errors. */
- uint64_t err_clock_queue; /* Clock Queue errors. */
- uint64_t err_ts_past; /* Timestamp in the past. */
- uint64_t err_ts_future; /* Timestamp in the distant future. */
- uint64_t err_ts_order; /* Timestamp not in ascending order. */
+ RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
+ RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
+ RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
+ RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
+ RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
+ RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
};
/* Sample ID information of eCPRI flex parser structure. */
@@ -1287,16 +1287,16 @@ struct mlx5_aso_ct_action {
void *dr_action_orig;
/* General action object for reply dir. */
void *dr_action_rply;
- uint32_t refcnt; /* Action used count in device flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */
uint16_t peer; /* The only peer port index could also use this CT. */
- enum mlx5_aso_ct_state state; /* ASO CT state. */
+ RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
bool is_original; /* The direction of the DR action to be used. */
};
/* CT action object state update. */
#define MLX5_ASO_CT_UPDATE_STATE(c, s) \
- __atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+ rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -1370,7 +1370,7 @@ struct mlx5_flex_pattern_field {
/* Port flex item context. */
struct mlx5_flex_item {
struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
- uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
uint32_t mapnum; /* Number of pattern translation entries. */
struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
@@ -1383,7 +1383,7 @@ struct mlx5_flex_item {
#define MLX5_SRV6_SAMPLE_NUM 5
/* Mlx5 internal flex parser profile structure. */
struct mlx5_internal_flex_parser_profile {
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
struct mlx5_flex_item flex; /* Hold map info for modify field. */
};
@@ -1512,9 +1512,9 @@ struct mlx5_dev_ctx_shared {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
#endif
- struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
- struct mlx5_hlist *modify_cmds;
- struct mlx5_hlist *tag_table;
+ RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
+ RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
+ RTE_ATOMIC(struct mlx5_hlist *) tag_table;
struct mlx5_list *port_id_action_list; /* Port ID action list. */
struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
struct mlx5_list *sample_action_list; /* List of sample actions. */
@@ -1525,7 +1525,7 @@ struct mlx5_dev_ctx_shared {
/* SW steering counters management structure. */
void *default_miss_action; /* Default miss action. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
- struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
+ RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
/* Shared interrupt handler section. */
struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
@@ -1570,7 +1570,7 @@ struct mlx5_dev_ctx_shared {
* Caution, secondary process may rebuild the struct during port start.
*/
struct mlx5_proc_priv {
- void *hca_bar;
+ RTE_ATOMIC(void *) hca_bar;
/* Mapped HCA PCI BAR area. */
size_t uar_table_sz;
/* Size of UAR register table. */
@@ -1635,7 +1635,7 @@ struct mlx5_rxq_obj {
/* Indirection table. */
struct mlx5_ind_table_obj {
LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
union {
void *ind_table; /**< Indirection table. */
struct mlx5_devx_obj *rqt; /* DevX RQT object. */
@@ -1826,7 +1826,7 @@ enum mlx5_quota_state {
};
struct mlx5_quota {
- uint8_t state; /* object state */
+ RTE_ATOMIC(uint8_t) state; /* object state */
uint8_t mode; /* metering mode */
/**
* Keep track of application update types.
@@ -1955,7 +1955,7 @@ struct mlx5_priv {
uint32_t flex_item_map; /* Map of allocated flex item elements. */
uint32_t nb_queue; /* HW steering queue number. */
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
- uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
+ RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -2007,7 +2007,7 @@ struct mlx5_priv {
#endif
struct rte_eth_dev *shared_host; /* Host device for HW steering. */
- uint16_t shared_refcnt; /* HW steering host reference counter. */
+ RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f31fdfb..1954975 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4623,8 +4623,8 @@ struct mlx5_translated_action_handle {
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
- __atomic_fetch_add(&shared_rss->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
+ rte_memory_order_relaxed);
return idx;
default:
break;
@@ -7459,7 +7459,7 @@ struct mlx5_list_entry *
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
- __atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
mlx5_free(default_miss_ctx.queue);
}
mlx5_flow_pop_thread_workspace();
@@ -7470,10 +7470,10 @@ struct mlx5_list_entry *
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
if (rss_desc->shared_rss)
- __atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
+ rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
+ rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
@@ -7976,7 +7976,8 @@ struct rte_flow *
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
+ rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
flow_mreg_del_copy_action(dev, flow);
@@ -9456,7 +9457,7 @@ struct mlx5_flow_workspace*
{
uint32_t pools_n, us;
- pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
+ pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -9558,17 +9559,17 @@ struct mlx5_flow_workspace*
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
age_param = MLX5_CNT_TO_AGE(cnt);
- if (__atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) != AGE_CANDIDATE)
+ if (rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) != AGE_CANDIDATE)
continue;
if (cur->data[i].hits != prev->data[i].hits) {
- __atomic_store_n(&age_param->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
continue;
}
- if (__atomic_fetch_add(&age_param->sec_since_last_hit,
+ if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
time_delta,
- __ATOMIC_RELAXED) + time_delta <= age_param->timeout)
+ rte_memory_order_relaxed) + time_delta <= age_param->timeout)
continue;
/**
* Hold the lock first, or if between the
@@ -9579,10 +9580,10 @@ struct mlx5_flow_workspace*
priv = rte_eth_devices[age_param->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_TMOUT, false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_TMOUT,
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
}
@@ -11407,7 +11408,7 @@ struct tunnel_db_element_release_ctx {
{
struct tunnel_db_element_release_ctx *ctx = x;
ctx->ret = 0;
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 34b5e0f..edfa76f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1049,7 +1049,7 @@ struct mlx5_flow_tunnel {
LIST_ENTRY(mlx5_flow_tunnel) chain;
struct rte_flow_tunnel app_tunnel; /** app tunnel copy */
uint32_t tunnel_id; /** unique tunnel ID */
- uint32_t refctn;
+ RTE_ATOMIC(uint32_t) refctn;
struct rte_flow_action action;
struct rte_flow_item item;
struct mlx5_hlist *groups; /** tunnel groups */
@@ -1470,7 +1470,7 @@ struct rte_flow_pattern_template {
struct mlx5dr_match_template *mt; /* mlx5 match template. */
uint64_t item_flags; /* Item layer flags. */
uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
/*
* If true, then rule pattern should be prepended with
* represented_port pattern item.
@@ -1502,7 +1502,7 @@ struct rte_flow_actions_template {
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint16_t recom_off; /* Offset of DR IPv6 routing push remove action. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
uint8_t flex_item; /* flex item index. */
};
@@ -1855,7 +1855,7 @@ struct rte_flow_template_table {
/* Shared RSS action structure */
struct mlx5_shared_action_rss {
ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
- uint32_t refcnt; /**< Atomically accessed refcnt. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
struct mlx5_ind_table_obj *ind_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index ab9eb21..a94b228 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -619,7 +619,7 @@
uint8_t *u8addr;
uint8_t hit;
- if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=
AGE_CANDIDATE)
continue;
byte = 63 - (j / 8);
@@ -627,13 +627,13 @@
u8addr = (uint8_t *)addr;
hit = (u8addr[byte] >> offset) & 0x1;
if (hit) {
- __atomic_store_n(&ap->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
} else {
struct mlx5_priv *priv;
- __atomic_fetch_add(&ap->sec_since_last_hit,
- diff, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
+ diff, rte_memory_order_relaxed);
/* If timeout passed add to aged-out list. */
if (ap->sec_since_last_hit <= ap->timeout)
continue;
@@ -641,12 +641,11 @@
rte_eth_devices[ap->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&ap->state,
+ if (rte_atomic_compare_exchange_strong_explicit(&ap->state,
&expected,
AGE_TMOUT,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
LIST_INSERT_HEAD(&age_info->aged_aso,
act, next);
MLX5_AGE_SET(age_info,
@@ -946,10 +945,10 @@
for (i = 0; i < n; ++i) {
aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
MLX5_ASSERT(aso_mtr);
- verdict = __atomic_compare_exchange_n(&aso_mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
}
sq->tail += n;
@@ -1005,10 +1004,10 @@
mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,
MLX5_INDIRECT_ACTION_IDX_GET(job->action));
MLX5_ASSERT(mtr);
- verdict = __atomic_compare_exchange_n(&mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
}
@@ -1103,7 +1102,7 @@
struct mlx5_aso_sq *sq;
struct mlx5_dev_ctx_shared *sh = priv->sh;
uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
- uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+ uint8_t state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);
poll_cq_t poll_mtr_cq =
is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;
@@ -1112,7 +1111,7 @@
sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);
do {
poll_mtr_cq(priv, sq);
- if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==
ASO_METER_READY)
return 0;
/* Waiting for CQE ready. */
@@ -1411,7 +1410,7 @@
uint16_t wqe_idx;
struct mlx5_aso_ct_pool *pool;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_FREE) {
DRV_LOG(ERR, "Fail: No context to query");
@@ -1620,12 +1619,12 @@
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
else
sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
/* Waiting for CQE ready, consider should block or sleep. */
@@ -1791,7 +1790,7 @@
bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (sh->config.dv_flow_en == 2)
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
@@ -1807,7 +1806,7 @@
}
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ state = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_READY ||
state == ASO_CONNTRACK_QUERY)
return 0;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d434c67..f9c56af 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -313,7 +313,7 @@ enum mlx5_l3_tunnel_detection {
}
static inline struct mlx5_hlist *
-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,
const char *name, uint32_t size, bool direct_key,
bool lcores_share, void *ctx,
mlx5_list_create_cb cb_create,
@@ -327,7 +327,7 @@ enum mlx5_l3_tunnel_detection {
struct mlx5_hlist *expected = NULL;
char s[MLX5_NAME_SIZE];
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
if (likely(hl))
return hl;
snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
@@ -341,11 +341,11 @@ enum mlx5_l3_tunnel_detection {
"cannot allocate resource memory");
return NULL;
}
- if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_hlist_destroy(hl);
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
}
return hl;
}
@@ -6139,8 +6139,8 @@ struct mlx5_list_entry *
static struct mlx5_indexed_pool *
flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
{
- struct mlx5_indexed_pool *ipool = __atomic_load_n
- (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+ struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
+ (&sh->mdh_ipools[index], rte_memory_order_seq_cst);
if (!ipool) {
struct mlx5_indexed_pool *expected = NULL;
@@ -6165,13 +6165,13 @@ struct mlx5_list_entry *
ipool = mlx5_ipool_create(&cfg);
if (!ipool)
return NULL;
- if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
- &expected, ipool, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
+ &expected, ipool,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_ipool_destroy(ipool);
- ipool = __atomic_load_n(&sh->mdh_ipools[index],
- __ATOMIC_SEQ_CST);
+ ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
+ rte_memory_order_seq_cst);
}
}
return ipool;
@@ -6992,9 +6992,9 @@ struct mlx5_list_entry *
age_info = GET_PORT_AGE_INFO(priv);
age_param = flow_dv_counter_idx_get_age(dev, counter);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since counter may still in process.
@@ -7002,7 +7002,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -7038,8 +7038,8 @@ struct mlx5_list_entry *
* indirect action API, shared info is 1 before the reduction,
* so this condition is failed and function doesn't return here.
*/
- if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
- __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
+ rte_memory_order_relaxed) - 1)
return;
}
cnt->pool = pool;
@@ -10203,8 +10203,8 @@ struct mlx5_list_entry *
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
} else {
ret = rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -10243,8 +10243,8 @@ struct mlx5_list_entry *
geneve_opt_resource->option_class = geneve_opt_v->option_class;
geneve_opt_resource->option_type = geneve_opt_v->option_type;
geneve_opt_resource->length = geneve_opt_v->option_len;
- __atomic_store_n(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
}
exit:
rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
@@ -12192,8 +12192,8 @@ struct mlx5_list_entry *
(void *)(uintptr_t)(dev_flow->flow_idx);
age_param->timeout = age->timeout;
age_param->port_id = dev->data->port_id;
- __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);
return counter;
}
@@ -13241,9 +13241,9 @@ struct mlx5_list_entry *
uint16_t expected = AGE_CANDIDATE;
age_info = GET_PORT_AGE_INFO(priv);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since age action may still in process.
@@ -13251,7 +13251,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
LIST_REMOVE(age, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -13275,7 +13275,7 @@ struct mlx5_list_entry *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
- uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
flow_dv_aso_age_remove_from_age(dev, age);
@@ -13451,7 +13451,7 @@ struct mlx5_list_entry *
return 0; /* 0 is an error. */
}
}
- __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);
return pool->index | ((age_free->offset + 1) << 16);
}
@@ -13481,10 +13481,10 @@ struct mlx5_list_entry *
aso_age->age_params.context = context;
aso_age->age_params.timeout = timeout;
aso_age->age_params.port_id = dev->data->port_id;
- __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
- __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
static void
@@ -13666,12 +13666,12 @@ struct mlx5_list_entry *
uint32_t ret;
struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
/* Cannot release when CT is in the ASO SQ. */
if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
return -1;
- ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
if (ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -13861,7 +13861,7 @@ struct mlx5_list_entry *
pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
/* 0: inactive, 1: created, 2+: used by flows. */
- __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
if (!ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -14813,8 +14813,8 @@ struct mlx5_list_entry *
age_act = flow_aso_age_get_by_idx(dev, owner_idx);
if (flow->age == 0) {
flow->age = owner_idx;
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&age_act->refcnt, 1,
+ rte_memory_order_relaxed);
}
age_act_pos = actions_n++;
action_flags |= MLX5_FLOW_ACTION_AGE;
@@ -14851,9 +14851,9 @@ struct mlx5_list_entry *
} else {
if (flow->counter == 0) {
flow->counter = owner_idx;
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&cnt_act->shared_info.refcnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
}
/* Save information first, will apply later. */
action_flags |= MLX5_FLOW_ACTION_COUNT;
@@ -15185,8 +15185,8 @@ struct mlx5_list_entry *
flow->indirect_type =
MLX5_INDIRECT_ACTION_TYPE_CT;
flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
}
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
@@ -15855,7 +15855,7 @@ struct mlx5_list_entry *
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
- __atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
}
void
@@ -16038,8 +16038,8 @@ struct mlx5_list_entry *
sh->geneve_tlv_option_resource;
rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
if (geneve_opt_resource) {
- if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED) - 1)) {
+ if (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
claim_zero(mlx5_devx_cmd_destroy
(geneve_opt_resource->obj));
mlx5_free(sh->geneve_tlv_option_resource);
@@ -16448,7 +16448,7 @@ struct mlx5_list_entry *
/* Update queue with indirect table queue memoyr. */
origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
- __atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
@@ -16494,9 +16494,9 @@ struct mlx5_list_entry *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid shared action");
- if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
- 0, 0, __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,
+ 0, rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -16632,10 +16632,10 @@ struct rte_flow_action_handle *
return __flow_dv_action_rss_release(dev, idx, error);
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
- if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
- &no_flow_refcnt, 1, false,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
+ &no_flow_refcnt, 1,
+ rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -17595,13 +17595,13 @@ struct rte_flow_action_handle *
case MLX5_INDIRECT_ACTION_TYPE_AGE:
age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
resp = data;
- resp->aged = __atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) == AGE_TMOUT ?
+ resp->aged = rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) == AGE_TMOUT ?
1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_dv_query_count(dev, idx, data, error);
@@ -17678,12 +17678,12 @@ struct rte_flow_action_handle *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
}
- resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+ resp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==
AGE_TMOUT ? 1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 4ae03a2..8a02247 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -86,7 +86,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map |= 1u << idx;
}
}
@@ -107,7 +107,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map &= ~(1u << idx);
rte_spinlock_unlock(&priv->flex_item_sl);
}
@@ -379,7 +379,7 @@
return ret;
}
if (acquire)
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return ret;
}
@@ -414,7 +414,7 @@
rte_errno = -EINVAL;
return -EINVAL;
}
- __atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
return 0;
}
@@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *
}
flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
/* Mark initialized flex item valid. */
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return (struct rte_flow_item_flex_handle *)flex;
error:
@@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
"invalid flex item handle value");
}
- if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
+ rte_memory_order_acquire, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&priv->flex_item_sl);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 35f1ed7..7f8d234 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -715,7 +715,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
}
if (acts->mark)
- if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed) - 1))
flow_hw_rxq_flag_set(dev, false);
if (acts->jump) {
@@ -2298,7 +2299,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
goto err;
acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
- __atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
@@ -4537,8 +4539,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
uint8_t i;
for (i = 0; i < nb_action_templates; i++) {
- uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED);
+ uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (refcnt <= 1) {
rte_flow_error_set(error, EINVAL,
@@ -4576,8 +4578,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
while (i--) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_sub_fetch(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
return rte_errno;
}
@@ -4748,8 +4750,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
- ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED) + 1;
+ ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (ret <= 1) {
rte_errno = EINVAL;
goto it_error;
@@ -4800,14 +4802,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
for (i = 0; i < nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_fetch_sub(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
i = nb_item_templates;
it_error:
while (i--)
- __atomic_fetch_sub(&item_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
error:
err = rte_errno;
if (tbl) {
@@ -5039,12 +5041,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
LIST_REMOVE(table, next);
for (i = 0; i < table->nb_item_templates; i++)
- __atomic_fetch_sub(&table->its[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
+ 1, rte_memory_order_relaxed);
for (i = 0; i < table->nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
- __atomic_fetch_sub(&table->ats[i].action_template->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
+ 1, rte_memory_order_relaxed);
}
flow_hw_destroy_table_multi_pattern_ctx(table);
if (table->matcher_info[0].matcher)
@@ -7287,7 +7289,7 @@ enum mlx5_hw_indirect_list_relative_position {
if (!at->tmpl)
goto error;
at->action_flags = action_flags;
- __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
return at;
error:
@@ -7323,7 +7325,7 @@ enum mlx5_hw_indirect_list_relative_position {
uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Action template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -7897,7 +7899,7 @@ enum mlx5_hw_indirect_list_relative_position {
break;
}
}
- __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
rte_errno = pattern_template_validate(dev, &it, 1);
if (rte_errno)
goto error;
@@ -7933,7 +7935,7 @@ enum mlx5_hw_indirect_list_relative_position {
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Item template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -10513,7 +10515,8 @@ struct mlx5_list_entry *
}
dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
priv->shared_host = host_dev;
- __atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
}
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
@@ -10698,7 +10701,8 @@ struct mlx5_list_entry *
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
if (priv->hw_q) {
@@ -10814,7 +10818,8 @@ struct mlx5_list_entry *
priv->hw_q = NULL;
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
mlx5_free(priv->hw_attr);
@@ -10872,8 +10877,8 @@ struct mlx5_list_entry *
NULL,
"Invalid CT destruction index");
}
- __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
+ rte_memory_order_relaxed);
mlx5_ipool_free(pool->cts, idx);
return 0;
}
@@ -11575,7 +11580,7 @@ struct mlx5_hw_q_job *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_REPORTED:
case HWS_AGE_AGED_OUT_NOT_REPORTED:
resp->aged = 1;
@@ -11595,8 +11600,8 @@ struct mlx5_hw_q_job *
}
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (¶m->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (¶m->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index 4045c4c..f8eff60 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -2055,9 +2055,9 @@ struct mlx5_flow_meter_policy *
NULL, "Meter profile id not valid.");
/* Meter policy must exist. */
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
if (!priv->sh->config.dv_esw_en)
domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -2137,7 +2137,7 @@ struct mlx5_flow_meter_policy *
fm->is_enable = params->meter_enable;
fm->shared = !!shared;
fm->color_aware = !!params->use_prev_mtr_color;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
fm->def_policy = 1;
fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
@@ -2166,7 +2166,7 @@ struct mlx5_flow_meter_policy *
}
fm->active_state = params->meter_enable;
if (mtr_policy)
- __atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
error:
mlx5_flow_destroy_mtr_tbls(dev, fm);
@@ -2271,8 +2271,8 @@ struct mlx5_flow_meter_policy *
NULL, "Failed to create devx meter.");
}
fm->active_state = params->meter_enable;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
- __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
}
#endif
@@ -2295,7 +2295,7 @@ struct mlx5_flow_meter_policy *
if (fmp == NULL)
return -1;
/* Update dependencies. */
- __atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);
fm->profile = NULL;
/* Remove from list. */
if (!priv->sh->meter_aso_en) {
@@ -2313,15 +2313,15 @@ struct mlx5_flow_meter_policy *
}
mlx5_flow_destroy_mtr_tbls(dev, fm);
if (fm->def_policy)
- __atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
+ 1, rte_memory_order_relaxed);
if (priv->sh->meter_aso_en) {
if (!fm->def_policy) {
mtr_policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
if (mtr_policy)
- __atomic_fetch_sub(&mtr_policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
fm->policy_id = 0;
}
fm->def_policy = 0;
@@ -2424,13 +2424,13 @@ struct mlx5_flow_meter_policy *
RTE_MTR_ERROR_TYPE_UNSPECIFIED,
NULL, "Meter object is being used.");
/* Destroy the meter profile. */
- __atomic_fetch_sub(&fm->profile->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
+ 1, rte_memory_order_relaxed);
/* Destroy the meter policy. */
policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
- __atomic_fetch_sub(&policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c
index 14a2a8b..6ad0e8a 100644
--- a/drivers/net/mlx5/mlx5_flow_quota.c
+++ b/drivers/net/mlx5/mlx5_flow_quota.c
@@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
struct mlx5_quota *quota_obj =
sq->elts[(sq->tail + i) & mask].quota_obj;
- __atomic_compare_exchange_n("a_obj->state, &state,
- MLX5_QUOTA_STATE_READY, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit("a_obj->state, &state,
+ MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
}
}
@@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
rte_spinlock_lock(&sq->sqsl);
mlx5_quota_cmd_completion_handle(sq);
rte_spinlock_unlock(&sq->sqsl);
- if (__atomic_load_n("a_obj->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit("a_obj->state, rte_memory_order_relaxed) ==
MLX5_QUOTA_STATE_READY)
return 0;
} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
{
uint8_t state = MLX5_QUOTA_STATE_READY;
- bool verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ bool verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict)
return rte_flow_error_set(error, EBUSY,
@@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
async_job ? async_job : &sync_job, push, NULL);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
async_job ? async_job : &sync_job, push,
(void *)(uintptr_t)update->conf);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -593,9 +593,9 @@ struct rte_flow_action_handle *
NULL, "quota: failed to allocate quota object");
return NULL;
}
- verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: new quota object has invalid state");
@@ -616,8 +616,8 @@ struct rte_flow_action_handle *
(void *)(uintptr_t)conf);
if (ret) {
mlx5_ipool_free(qctx->quota_ipool, id);
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
+ rte_memory_order_relaxed);
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: WR failure");
return 0;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index c31f2f3..1b625e0 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -149,7 +149,7 @@
}
if (param->timeout == 0)
continue;
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_NOT_REPORTED:
case HWS_AGE_AGED_OUT_REPORTED:
/* Already aged-out, no action is needed. */
@@ -171,8 +171,8 @@
hits = rte_be_to_cpu_64(stats[i].hits);
if (param->nb_cnts == 1) {
if (hits != param->accumulator_last_hits) {
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
param->accumulator_last_hits = hits;
continue;
}
@@ -184,8 +184,8 @@
param->accumulator_cnt = 0;
if (param->accumulator_last_hits !=
param->accumulator_hits) {
- __atomic_store_n(¶m->sec_since_last_hit,
- 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit,
+ 0, rte_memory_order_relaxed);
param->accumulator_last_hits =
param->accumulator_hits;
param->accumulator_hits = 0;
@@ -193,9 +193,9 @@
}
param->accumulator_hits = 0;
}
- if (__atomic_fetch_add(¶m->sec_since_last_hit, time_delta,
- __ATOMIC_RELAXED) + time_delta <=
- __atomic_load_n(¶m->timeout, __ATOMIC_RELAXED))
+ if (rte_atomic_fetch_add_explicit(¶m->sec_since_last_hit, time_delta,
+ rte_memory_order_relaxed) + time_delta <=
+ rte_atomic_load_explicit(¶m->timeout, rte_memory_order_relaxed))
continue;
/* Prepare the relevant ring for this AGE parameter */
if (priv->hws_strict_queue)
@@ -203,10 +203,10 @@
else
r = age_info->hw_age.aged_list;
/* Changing the state atomically and insert it into the ring. */
- if (__atomic_compare_exchange_n(¶m->state, &expected1,
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected1,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
sizeof(uint32_t),
1, NULL);
@@ -221,11 +221,10 @@
*/
expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
if (ret == 0 &&
- !__atomic_compare_exchange_n(¶m->state,
+ !rte_atomic_compare_exchange_strong_explicit(¶m->state,
&expected2, expected1,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected2 == HWS_AGE_FREE)
mlx5_hws_age_param_free(priv,
param->own_cnt_index,
@@ -235,10 +234,10 @@
if (!priv->hws_strict_queue)
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
} else {
- __atomic_compare_exchange_n(¶m->state, &expected2,
+ rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected2,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
}
}
/* The event is irrelevant in strict queue mode. */
@@ -796,8 +795,8 @@ struct mlx5_hws_cnt_pool *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
- switch (__atomic_exchange_n(¶m->state, HWS_AGE_FREE,
- __ATOMIC_RELAXED)) {
+ switch (rte_atomic_exchange_explicit(¶m->state, HWS_AGE_FREE,
+ rte_memory_order_relaxed)) {
case HWS_AGE_CANDIDATE:
case HWS_AGE_AGED_OUT_REPORTED:
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
@@ -862,8 +861,8 @@ struct mlx5_hws_cnt_pool *
"cannot allocate AGE parameter");
return 0;
}
- MLX5_ASSERT(__atomic_load_n(¶m->state,
- __ATOMIC_RELAXED) == HWS_AGE_FREE);
+ MLX5_ASSERT(rte_atomic_load_explicit(¶m->state,
+ rte_memory_order_relaxed) == HWS_AGE_FREE);
if (shared) {
param->nb_cnts = 0;
param->accumulator_hits = 0;
@@ -914,9 +913,9 @@ struct mlx5_hws_cnt_pool *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
if (update_ade->timeout_valid) {
- uint32_t old_timeout = __atomic_exchange_n(¶m->timeout,
+ uint32_t old_timeout = rte_atomic_exchange_explicit(¶m->timeout,
update_ade->timeout,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (old_timeout == 0)
sec_since_last_hit_reset = true;
@@ -935,8 +934,8 @@ struct mlx5_hws_cnt_pool *
state_update = true;
}
if (sec_since_last_hit_reset)
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
if (state_update) {
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
@@ -945,13 +944,13 @@ struct mlx5_hws_cnt_pool *
* - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
* - AGED_OUT_REPORTED -> CANDIDATE
*/
- if (!__atomic_compare_exchange_n(¶m->state, &expected,
+ if (!rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
HWS_AGE_CANDIDATE_INSIDE_RING,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected == HWS_AGE_AGED_OUT_REPORTED)
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
return 0;
}
@@ -976,9 +975,9 @@ struct mlx5_hws_cnt_pool *
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
MLX5_ASSERT(param != NULL);
- if (__atomic_compare_exchange_n(¶m->state, &expected,
- HWS_AGE_AGED_OUT_REPORTED, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
+ HWS_AGE_AGED_OUT_REPORTED,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
return param->context;
switch (expected) {
case HWS_AGE_FREE:
@@ -990,8 +989,8 @@ struct mlx5_hws_cnt_pool *
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
break;
case HWS_AGE_CANDIDATE_INSIDE_RING:
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
break;
case HWS_AGE_CANDIDATE:
/*
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index e005960..481442f 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -101,7 +101,7 @@ struct mlx5_hws_cnt_pool {
LIST_ENTRY(mlx5_hws_cnt_pool) next;
struct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;
struct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;
- uint32_t query_gen __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) query_gen __rte_cache_aligned;
struct mlx5_hws_cnt *pool;
struct mlx5_hws_cnt_raw_data_mng *raw_mng;
struct rte_ring *reuse_list;
@@ -134,10 +134,10 @@ enum {
/* HWS counter age parameter. */
struct mlx5_hws_age_param {
- uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/* Time in seconds since last hit (atomically accessed). */
- uint16_t state; /* AGE state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
uint64_t accumulator_last_hits;
/* Last total value of hits for comparing. */
uint64_t accumulator_hits;
@@ -426,7 +426,7 @@ struct mlx5_hws_age_param {
iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
hpool->pool[iidx].in_used = false;
hpool->pool[iidx].query_gen_when_free =
- __atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
qcache = hpool->cache->qcache[*queue];
if (unlikely(qcache == NULL)) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 2fce908..c627113 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {
/* RX queue private data. */
struct mlx5_rxq_priv {
uint16_t idx; /* Queue index. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -188,7 +188,7 @@ struct mlx5_rxq_priv {
/* External RX queue descriptor. */
struct mlx5_external_rxq {
uint32_t hw_id; /* Queue index in the Hardware. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
};
/* mlx5_rxq.c */
@@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
void *addr;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {
MLX5_ASSERT(rep != NULL);
/* Replace MPRQ buf. */
(*rxq->mprq_bufs)[rq_idx] = rep;
@@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
void *buf_addr;
/* Increment the refcnt of the whole chunk. */
- __atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
- MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
- __ATOMIC_RELAXED) <= strd_n + 1);
+ rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
+ MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
+ rte_memory_order_relaxed) <= strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
/*
* MLX5 device doesn't use iova but it is necessary in a
@@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
return false;
rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
- return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+ return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);
}
#define LWM_COOKIE_RXQID_OFFSET 0
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dd51687..f67aaa6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -416,7 +416,7 @@
rte_errno = EINVAL;
return -rte_errno;
}
- return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1319,7 +1319,7 @@
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
for (j = 0; j != strd_n; ++j) {
shinfo = &buf->shinfos[j];
shinfo->free_cb = mlx5_mprq_buf_free_cb;
@@ -2037,7 +2037,7 @@ struct mlx5_rxq_priv *
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
if (rxq != NULL)
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2059,7 +2059,7 @@ struct mlx5_rxq_priv *
if (rxq == NULL)
return 0;
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2138,7 +2138,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2158,7 +2158,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2447,8 +2447,8 @@ struct mlx5_ind_table_obj *
(memcmp(ind_tbl->queues, queues,
ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
== 0)) {
- __atomic_fetch_add(&ind_tbl->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
+ rte_memory_order_relaxed);
break;
}
}
@@ -2479,7 +2479,7 @@ struct mlx5_ind_table_obj *
unsigned int ret;
rte_rwlock_write_lock(&priv->ind_tbls_lock);
- ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
@@ -2561,7 +2561,7 @@ struct mlx5_ind_table_obj *
}
return ret;
}
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
return 0;
}
@@ -2626,7 +2626,7 @@ struct mlx5_ind_table_obj *
{
uint32_t refcnt;
- refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+ refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
if (refcnt <= 1)
return 0;
/*
@@ -3258,8 +3258,8 @@ struct mlx5_hrxq *
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
if (ext_rxq == NULL)
return -rte_errno;
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
if (ext_rxq->hw_id != hw_idx) {
DRV_LOG(ERR, "Port %u external RxQ index %u "
"is already mapped to HW index (requesting is "
@@ -3296,8 +3296,8 @@ struct mlx5_hrxq *
rte_errno = EINVAL;
return -rte_errno;
}
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
port_id, dpdk_idx);
rte_errno = EINVAL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f8d6728..c241a1d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1441,7 +1441,7 @@
rte_delay_us_sleep(1000 * priv->rxqs_n);
DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
if (priv->sh->config.dv_flow_en == 2) {
- if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))
flow_hw_rxq_flag_set(dev, false);
} else {
mlx5_flow_stop_default(dev);
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index b1e8ea1..0e44df5 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -179,7 +179,7 @@ struct mlx5_txq_data {
__extension__
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
bool is_hairpin; /* Whether TxQ type is Hairpin. */
unsigned int max_inline_data; /* Max inline data. */
@@ -339,8 +339,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
* the service thread, data should be re-read.
*/
rte_compiler_barrier();
- ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
- ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
+ ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
+ ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
break;
@@ -350,8 +350,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts -= ts;
if (unlikely(mts >= UINT64_MAX / 2)) {
/* We have negative integer, mts is in the past. */
- __atomic_fetch_add(&sh->txpp.err_ts_past,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
+ 1, rte_memory_order_relaxed);
return -1;
}
tick = sh->txpp.tick;
@@ -360,8 +360,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts = (mts + tick - 1) / tick;
if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
/* We have mts is too distant future. */
- __atomic_fetch_add(&sh->txpp.err_ts_future,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
+ 1, rte_memory_order_relaxed);
return -1;
}
mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
@@ -1743,8 +1743,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
/* Convert the timestamp into completion to wait. */
ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
if (txq->ts_last && ts < txq->ts_last)
- __atomic_fetch_add(&txq->sh->txpp.err_ts_order,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
+ 1, rte_memory_order_relaxed);
txq->ts_last = ts;
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
sh = txq->sh;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 5a5df2d..4e26fa2 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -538,12 +538,12 @@
uint64_t *ps;
rte_compiler_barrier();
- tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
- op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
+ tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);
+ op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);
rte_compiler_barrier();
- if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
+ if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))
continue;
- if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
+ if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))
continue;
ps = (uint64_t *)ts;
ps[0] = tm;
@@ -561,8 +561,8 @@
ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
rte_compiler_barrier();
- __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);
rte_wmb();
}
@@ -590,8 +590,8 @@
*/
DRV_LOG(DEBUG,
"Clock Queue error sync lost (%X).", opcode);
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
return;
@@ -633,10 +633,10 @@
if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
return;
MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
- sh->txpp.ts.ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
- sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+ sh->txpp.ts.ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+ sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
sh->txpp.ts_p = 0;
if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -677,8 +677,8 @@
/* Check whether we have missed interrupts. */
if (cq_ci - wq->cq_ci != 1) {
DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
- __atomic_fetch_add(&sh->txpp.err_miss_int,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
+ 1, rte_memory_order_relaxed);
/* Check sync lost on wqe index. */
if (cq_ci - wq->cq_ci >=
(((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -693,8 +693,8 @@
/* Fire new requests to Rearm Queue. */
if (error) {
DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_rearm_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
}
@@ -987,8 +987,8 @@
mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
if (to.cts.op_own >> 4) {
DRV_LOG(DEBUG, "Clock Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
return -EIO;
}
@@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);
return 0;
}
@@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
do {
uint64_t ts, ci;
- ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
- ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
+ ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);
+ ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ts,
- __ATOMIC_RELAXED) != ts)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
+ rte_memory_order_relaxed) != ts)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
- __ATOMIC_RELAXED) != ci)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
+ rte_memory_order_relaxed) != ci)
continue;
tsa->ts = ts;
tsa->ci_ts = ci;
@@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
for (i = 0; i < n_txpp; ++i)
stats[n_used + i].id = n_used + i;
stats[n_used + 0].value =
- __atomic_load_n(&sh->txpp.err_miss_int,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_miss_int,
+ rte_memory_order_relaxed);
stats[n_used + 1].value =
- __atomic_load_n(&sh->txpp.err_rearm_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
+ rte_memory_order_relaxed);
stats[n_used + 2].value =
- __atomic_load_n(&sh->txpp.err_clock_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
+ rte_memory_order_relaxed);
stats[n_used + 3].value =
- __atomic_load_n(&sh->txpp.err_ts_past,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_past,
+ rte_memory_order_relaxed);
stats[n_used + 4].value =
- __atomic_load_n(&sh->txpp.err_ts_future,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_future,
+ rte_memory_order_relaxed);
stats[n_used + 5].value =
- __atomic_load_n(&sh->txpp.err_ts_order,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_order,
+ rte_memory_order_relaxed);
stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
stats[n_used + 8].value = sh->txpp.sync_lost;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 14f55e8..da4236f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *
rte_errno = ENOMEM;
goto error;
}
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
tmpl->is_hairpin = false;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->is_hairpin = true;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
- __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
}
return ctrl;
}
@@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *
if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
return 0;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
+ if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
return 1;
if (txq_ctrl->obj) {
priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *
txq_free_elts(txq_ctrl);
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
- if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
if (!txq_ctrl->is_hairpin)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq_ctrl, next);
@@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *
if (!(*priv->txqs)[idx])
return -1;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
}
/**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index e28db2e..fc03cc0 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -203,7 +203,7 @@ struct mlx5_indexed_pool *
struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
lc = pool->cache[cidx]->lc;
- gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
+ gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
if (gc && lc != gc) {
mlx5_ipool_lock(pool);
if (lc && !(--lc->ref_cnt))
@@ -266,8 +266,8 @@ struct mlx5_indexed_pool *
pool->cache[cidx]->len = fetch_size - 1;
return pool->cache[cidx]->idx[pool->cache[cidx]->len];
}
- trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
- __ATOMIC_ACQUIRE) : 0;
+ trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
+ rte_memory_order_acquire) : 0;
trunk_n = lc ? lc->n_trunk : 0;
cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
/* Check if index reach maximum. */
@@ -332,11 +332,11 @@ struct mlx5_indexed_pool *
lc = p;
lc->ref_cnt = 1;
pool->cache[cidx]->lc = lc;
- __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
}
/* Add trunk to trunks array. */
lc->trunks[trunk_idx] = trunk;
- __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
/* Enqueue half of the index to global. */
ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
fetch_size = trunk->free >> 1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index f3c0d76..3146092 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {
struct mlx5_indexed_cache {
struct mlx5_indexed_trunk **trunks;
- volatile uint32_t n_trunk_valid; /* Trunks allocated. */
+ volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
uint32_t n_trunk; /* Trunk pointer array size. */
uint32_t ref_cnt;
uint32_t len;
@@ -266,7 +266,7 @@ struct mlx5_indexed_pool {
uint32_t free_list; /* Index to first free trunk. */
};
struct {
- struct mlx5_indexed_cache *gc;
+ RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
/* Global cache. */
struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
/* Local cache. */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 02/46] net/ixgbe: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 01/46] net/mlx5: use rte " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 03/46] net/iavf: " Tyler Retzlaff
` (49 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 14 ++++++++------
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 ++--
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c61c52b..e63ae1a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1130,7 +1130,7 @@ struct rte_ixgbe_xstats_name_off {
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbe_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -1638,7 +1638,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbevf_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
@@ -4203,7 +4203,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
/* NOTE: review for potential ordering optimization */
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -4240,7 +4240,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -4336,7 +4336,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
ixgbe_dev_wait_setup_link_complete(dev, 0);
/* NOTE: review for potential ordering optimization */
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the IXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -4348,7 +4349,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR,
"Create link thread failed!");
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 22fc3be..8ad841e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -511,7 +511,7 @@ struct ixgbe_adapter {
*/
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
- bool link_thread_running;
+ RTE_ATOMIC(bool) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index f6c17d4..e7dfd6f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1831,7 +1831,7 @@
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
@@ -2114,7 +2114,7 @@
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 03/46] net/iavf: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 01/46] net/mlx5: use rte " Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 02/46] net/ixgbe: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 04/46] net/ice: " Tyler Retzlaff
` (48 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/iavf/iavf.h | 16 ++++++++--------
drivers/net/iavf/iavf_rxtx.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +++++++-------
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 824ae4a..6b977e5 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -238,8 +238,8 @@ struct iavf_info {
struct virtchnl_vlan_caps vlan_v2_caps;
uint64_t supported_rxdid;
uint8_t *proto_xtr; /* proto xtr type for all queues */
- volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
- uint32_t pend_cmd_count;
+ volatile RTE_ATOMIC(enum virtchnl_ops) pend_cmd; /* pending command not finished */
+ RTE_ATOMIC(uint32_t) pend_cmd_count;
int cmd_retval; /* return value of the cmd response from PF */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
@@ -456,13 +456,13 @@ struct iavf_cmd_info {
_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 1, rte_memory_order_relaxed);
return !ret;
}
@@ -472,13 +472,13 @@ struct iavf_cmd_info {
_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 2, rte_memory_order_relaxed);
return !ret;
}
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 0a5246d..d1d4e9f 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2025,7 +2025,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
@@ -2152,7 +2152,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
index 83825aa..20b656e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_neon.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
@@ -273,7 +273,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1111d30..6d5969f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -41,7 +41,7 @@ struct iavf_event_element {
};
struct iavf_event_handler {
- uint32_t ndev;
+ RTE_ATOMIC(uint32_t) ndev;
rte_thread_t tid;
int fd[2];
pthread_mutex_t lock;
@@ -129,7 +129,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_add(&handler->ndev, 1, __ATOMIC_RELAXED) + 1 != 1)
+ if (rte_atomic_fetch_add_explicit(&handler->ndev, 1, rte_memory_order_relaxed) + 1 != 1)
return 0;
#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
@@ -137,7 +137,7 @@ struct iavf_event_handler {
int err = pipe(handler->fd);
#endif
if (err != 0) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -146,7 +146,7 @@ struct iavf_event_handler {
if (rte_thread_create_internal_control(&handler->tid, "iavf-event",
iavf_dev_event_handle, NULL)) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -158,7 +158,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed) - 1 != 0)
return;
int unused = pthread_cancel((pthread_t)handler->tid.opaque_id);
@@ -574,8 +574,8 @@ struct iavf_event_handler {
/* read message and it's expected one */
if (msg_opc == vf->pend_cmd) {
uint32_t cmd_count =
- __atomic_fetch_sub(&vf->pend_cmd_count,
- 1, __ATOMIC_RELAXED) - 1;
+ rte_atomic_fetch_sub_explicit(&vf->pend_cmd_count,
+ 1, rte_memory_order_relaxed) - 1;
if (cmd_count == 0)
_notify_cmd(vf, msg_ret);
} else {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 04/46] net/ice: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (2 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 03/46] net/iavf: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 05/46] net/i40e: " Tyler Retzlaff
` (47 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/ice/base/ice_osdep.h | 4 ++--
drivers/net/ice/ice_dcf.c | 6 +++---
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 ++++----
drivers/net/ice/ice_dcf_parent.c | 16 ++++++++--------
drivers/net/ice/ice_ethdev.c | 12 ++++++------
drivers/net/ice/ice_ethdev.h | 2 +-
7 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index 0e14b93..c17f1bf 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -235,7 +235,7 @@ struct ice_lock {
ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
struct ice_dma_mem *mem, u64 size)
{
- static uint64_t ice_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) ice_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -243,7 +243,7 @@ struct ice_lock {
return NULL;
snprintf(z_name, sizeof(z_name), "ice_dma_%" PRIu64,
- __atomic_fetch_add(&ice_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&ice_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
0, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7f8f516..204d4ea 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -764,7 +764,7 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
rte_spinlock_init(&hw->vc_cmd_queue_lock);
TAILQ_INIT(&hw->vc_cmd_queue);
- __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->vsi_update_thread_num, 0, rte_memory_order_relaxed);
hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
if (hw->arq_buf == NULL) {
@@ -888,8 +888,8 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
ice_dcf_dev_interrupt_handler, hw);
/* Wait for all `ice-thread` threads to exit. */
- while (__atomic_load_n(&hw->vsi_update_thread_num,
- __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(&hw->vsi_update_thread_num,
+ rte_memory_order_acquire) != 0)
rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
ice_dcf_mode_disable(hw);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index aa2a723..7726681 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -105,7 +105,7 @@ struct ice_dcf_hw {
void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen);
- int vsi_update_thread_num;
+ RTE_ATOMIC(int) vsi_update_thread_num;
uint8_t *arq_buf;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index d58ec9d..8f3a385 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -1743,7 +1743,7 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
ice_dcf_adminq_need_retry(struct ice_adapter *ad)
{
return ad->hw.dcf_enabled &&
- !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+ !rte_atomic_load_explicit(&ad->dcf_state_on, rte_memory_order_relaxed);
}
/* Add UDP tunneling port */
@@ -1944,12 +1944,12 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
return -1;
}
- __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true, rte_memory_order_relaxed);
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 6e845f4..a478b69 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -123,8 +123,8 @@ struct ice_dcf_reset_event_param {
container_of(hw, struct ice_dcf_adapter, real_hw);
struct ice_adapter *parent_adapter = &adapter->parent;
- __atomic_fetch_add(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_relaxed);
rte_thread_detach(rte_thread_self());
@@ -133,8 +133,8 @@ struct ice_dcf_reset_event_param {
rte_spinlock_lock(&vsi_update_lock);
if (!ice_dcf_handle_vsi_update_event(hw)) {
- __atomic_store_n(&parent_adapter->dcf_state_on, true,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true,
+ rte_memory_order_relaxed);
ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
}
@@ -156,8 +156,8 @@ struct ice_dcf_reset_event_param {
free(param);
- __atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_release);
return 0;
}
@@ -269,8 +269,8 @@ struct ice_dcf_reset_event_param {
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
start_vsi_reset_thread(dcf_hw, true,
pf_msg->event_data.vf_vsi_map.vf_id);
break;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 87385d2..0f35c6a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4062,9 +4062,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = &dev->data->dev_link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
@@ -4078,9 +4078,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1a848b3..6cba643 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -621,7 +621,7 @@ struct ice_adapter {
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
/* True if DCF state of the associated PF is on */
- bool dcf_state_on;
+ RTE_ATOMIC(bool) dcf_state_on;
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 05/46] net/i40e: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (3 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 04/46] net/ice: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 06/46] net/hns3: " Tyler Retzlaff
` (46 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/i40e/i40e_ethdev.c | 4 ++--
drivers/net/i40e/i40e_rxtx.c | 6 +++---
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 380ce1a..801cc95 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -4687,7 +4687,7 @@ enum i40e_status_code
u64 size,
u32 alignment)
{
- static uint64_t i40e_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) i40e_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -4695,7 +4695,7 @@ enum i40e_status_code
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
- __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&i40e_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 5d25ab4..155f243 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -486,7 +486,7 @@
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
@@ -745,7 +745,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
@@ -867,7 +867,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index d873e30..3a99137 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -425,7 +425,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 06/46] net/hns3: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (4 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 05/46] net/i40e: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 07/46] net/bnxt: " Tyler Retzlaff
` (45 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/hns3/hns3_cmd.c | 18 ++++++------
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 +++++++++++------------
drivers/net/hns3/hns3_ethdev.h | 32 ++++++++++-----------
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++++++++++++++++++--------------------
drivers/net/hns3/hns3_intr.c | 36 +++++++++++------------
drivers/net/hns3/hns3_intr.h | 4 +--
drivers/net/hns3/hns3_mbx.c | 6 ++--
drivers/net/hns3/hns3_mp.c | 6 ++--
drivers/net/hns3/hns3_rxtx.c | 10 +++----
drivers/net/hns3/hns3_tm.c | 4 +--
11 files changed, 107 insertions(+), 107 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 001ff49..3c5fdbe 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,12 @@
hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
uint64_t size, uint32_t alignment)
{
- static uint64_t hns3_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
- __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
@@ -198,8 +198,8 @@
hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
csq->next_to_use, csq->next_to_clean);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- __atomic_store_n(&hw->reset.disable_cmd, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+ rte_memory_order_relaxed);
hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
}
@@ -313,7 +313,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (hns3_cmd_csq_done(hw))
return 0;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw,
"Don't wait for reply because of disable_cmd");
return -EBUSY;
@@ -360,7 +360,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
int retval;
uint32_t ntc;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -747,7 +747,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
ret = -EBUSY;
goto err_cmd_init;
}
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
ret = hns3_cmd_query_firmware_version_and_capability(hw);
if (ret) {
@@ -790,7 +790,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
return 0;
err_cmd_init:
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return ret;
}
@@ -819,7 +819,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (!hns->is_vf)
(void)hns3_firmware_compat_config(hw, false);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
/*
* A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 915e4eb..2f917fe 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,7 @@
* and configured directly to the hardware in the RESET_STAGE_RESTORE
* stage of the reset process.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a..327f6fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ struct hns3_intr_state {
};
static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
@@ -134,7 +134,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
hw->reset.stats.imp_cnt++;
@@ -148,7 +148,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
hw->reset.stats.global_cnt++;
@@ -1151,7 +1151,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* ensure that the hardware configuration remains unchanged before and
* after reset.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
}
@@ -1175,7 +1175,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* we will restore configurations to hardware in hns3_restore_vlan_table
* and hns3_restore_vlan_conf later.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
if (ret) {
hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5059,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5150,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5158,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
return ret;
hw->mac.link_status = RTE_ETH_LINK_DOWN;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -5184,7 +5184,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_tm_dev_stop_proc(hw);
hns3_config_mac_tnl_int(hw, false);
hns3_stop_tqps(hw);
@@ -5577,7 +5577,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
last_req = hns3_get_reset_level(hns, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -5677,7 +5677,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
}
static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
struct hns3_hw *hw = &hns->hw;
enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5737,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* any mailbox handling or command to firmware is only valid
* after hns3_cmd_init is called.
*/
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hw->reset.stats.request_cnt++;
break;
case HNS3_IMP_RESET:
@@ -5792,7 +5792,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -5913,10 +5913,10 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5925,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Check if there is any ongoing reset in the hardware. This status can
@@ -6576,7 +6576,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e70c5ff..4c0f076 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -401,17 +401,17 @@ enum hns3_schedule {
struct hns3_reset_data {
enum hns3_reset_stage stage;
- uint16_t schedule;
+ RTE_ATOMIC(uint16_t) schedule;
/* Reset flag, covering the entire reset process */
- uint16_t resetting;
+ RTE_ATOMIC(uint16_t) resetting;
/* Used to disable sending cmds during reset */
- uint16_t disable_cmd;
+ RTE_ATOMIC(uint16_t) disable_cmd;
/* The reset level being processed */
enum hns3_reset_level level;
/* Reset level set, each bit represents a reset level */
- uint64_t pending;
+ RTE_ATOMIC(uint64_t) pending;
/* Request reset level set, from interrupt or mailbox */
- uint64_t request;
+ RTE_ATOMIC(uint64_t) request;
int attempts; /* Reset failure retry */
int retries; /* Timeout failure retry in reset_post */
/*
@@ -499,7 +499,7 @@ struct hns3_hw {
* by dev_set_link_up() or dev_start().
*/
bool set_link_down;
- unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+ RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */
struct hns3_tqp_stats tqp_stats;
/* Include Mac stats | Rx stats | Tx stats */
struct hns3_mac_stats mac_stats;
@@ -844,7 +844,7 @@ struct hns3_vf {
struct hns3_adapter *adapter;
/* Whether PF support push link status change to VF */
- uint16_t pf_push_lsc_cap;
+ RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
/*
* If PF support push link status change, VF still need send request to
@@ -853,7 +853,7 @@ struct hns3_vf {
*/
uint16_t req_link_info_cnt;
- uint16_t poll_job_started; /* whether poll job is started */
+ RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
};
struct hns3_adapter {
@@ -997,32 +997,32 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
hns3_read_reg((a)->io_base, (reg))
static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t res;
- res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+ res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & (1UL << nr)) != 0;
return res;
}
static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_or_explicit(addr, (1UL << nr), rte_memory_order_relaxed);
}
static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), rte_memory_order_relaxed);
}
static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t mask = (1UL << nr);
- return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+ return rte_atomic_fetch_and_explicit(addr, ~mask, rte_memory_order_relaxed) & mask;
}
int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a..b83d5b9 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
};
static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
@@ -484,7 +484,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* MTU value issued by hns3 VF PMD must be less than or equal to
* PF's MTU.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "Failed to set mtu during resetting");
return -EIO;
}
@@ -565,7 +565,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
val = hns3_read_dev(hw, HNS3_VF_RST_ING);
hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
}
static void
@@ -650,8 +650,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
struct hns3_vf_to_pf_msg req;
- __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+ rte_memory_order_release);
hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* mailbox from PF driver to get this capability.
*/
hns3vf_handle_mbx_msg(hw);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
break;
remain_ms--;
@@ -677,10 +677,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* state: unknown (means pf not ack), not_supported, supported.
* Here config it as 'not_supported' when it's 'unknown' state.
*/
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
hns3_info(hw, "detect PF support push link status change!");
} else {
@@ -920,7 +920,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
bool send_req;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return;
send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +956,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* sending request to PF kernel driver, then could update link status by
* process PF kernel driver's link status mailbox message.
*/
- if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
return;
if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +994,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_hw *hw = &hns->hw;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw,
"vf set vlan id failed during resetting, vlan_id =%u",
vlan_id);
@@ -1059,7 +1059,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
unsigned int tmp_mask;
int ret = 0;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
mask);
return -EIO;
@@ -1252,7 +1252,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
- __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
hns3vf_service_handler(dev);
}
@@ -1264,7 +1264,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rte_eal_alarm_cancel(hns3vf_service_handler, dev);
- __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
}
static int
@@ -1500,10 +1500,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -1528,7 +1528,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1643,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1773,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -1847,7 +1847,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (ret)
return ret;
}
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return 0;
}
@@ -1888,7 +1888,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* from table space. Hence, for function reset software intervention is
* required to delete the entries.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -2030,7 +2030,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
}
static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
enum hns3_reset_level reset_level;
@@ -2070,10 +2070,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2082,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Hardware reset has been notified, we now have to poll & check if
@@ -2278,7 +2278,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30..26fa2eb 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ enum hns3_hw_err_report_type {
static int
hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
- int num, uint64_t *levels,
+ int num, RTE_ATOMIC(uint64_t) *levels,
enum hns3_hw_err_report_type err_type)
{
const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,7 @@ enum hns3_hw_err_report_type {
hw->reset.request = 0;
hw->reset.pending = 0;
hw->reset.resetting = 0;
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
if (!hw->reset.wait_data) {
@@ -2419,8 +2419,8 @@ enum hns3_hw_err_report_type {
/* Reschedule the reset process after successful initialization */
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+ rte_memory_order_relaxed);
return;
}
@@ -2428,15 +2428,15 @@ enum hns3_hw_err_report_type {
return;
/* Schedule restart alarm if it is not scheduled yet */
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_REQUESTED)
return;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED)
rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
}
@@ -2453,11 +2453,11 @@ enum hns3_hw_err_report_type {
return;
}
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) !=
SCHEDULE_NONE)
return;
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
}
@@ -2537,7 +2537,7 @@ enum hns3_hw_err_report_type {
}
static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
uint64_t merge_cnt = hw->reset.stats.merge_cnt;
uint64_t tmp;
@@ -2633,7 +2633,7 @@ enum hns3_hw_err_report_type {
* Regardless of whether the execution is successful or not, the
* flow after execution must be continued.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
(void)hns3_cmd_init(hw);
reset_fail:
hw->reset.attempts = 0;
@@ -2661,7 +2661,7 @@ enum hns3_hw_err_report_type {
int ret;
if (hw->reset.stage == RESET_STAGE_NONE) {
- __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_DOWN;
hns3_report_reset_begin(hw);
ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2750,7 @@ enum hns3_hw_err_report_type {
hns3_notify_reset_ready(hw, false);
hns3_clear_reset_level(hw, &hw->reset.pending);
hns3_clear_reset_status(hw);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.attempts = 0;
hw->reset.stats.success_cnt++;
hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2812,7 @@ enum hns3_hw_err_report_type {
hw->reset.mbuf_deferred_free = false;
}
rte_spinlock_unlock(&hw->lock);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_NONE;
hns3_clock_gettime(&tv);
timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c07..1edb07d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
};
int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
void hns3_handle_error(struct hns3_adapter *hns);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc16..10c6e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,7 @@
mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
while (wait_time < mbx_time_limit) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw, "Don't wait for mbx response because of "
"disable_cmd");
return -EBUSY;
@@ -382,7 +382,7 @@
rte_spinlock_lock(&hw->cmq.crq.lock);
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
@@ -457,7 +457,7 @@
}
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f194..ba8f8ec 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,7 @@
int i;
if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
- __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+ rte_atomic_load_explicit(&hw->secondary_cnt, rte_memory_order_relaxed) == 0)
return;
if (!mp_req_type_is_valid(type)) {
@@ -277,7 +277,7 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev)
ret);
return ret;
}
- __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
} else {
ret = hns3_mp_init_primary();
if (ret) {
@@ -297,7 +297,7 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
process_data.eth_dev_cnt--;
if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 7e636a0..73a388b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4464,7 +4464,7 @@
struct hns3_adapter *hns = eth_dev->data->dev_private;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+ rte_atomic_load_explicit(&hns->hw.reset.resetting, rte_memory_order_relaxed) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4530,7 +4530,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4586,7 +4586,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4615,7 +4615,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4648,7 +4648,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d969164..92a6685 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
/* don't goto fail_clear, user may try later */
@@ -1141,7 +1141,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
return -EBUSY;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 07/46] net/bnxt: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (5 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 06/46] net/hns3: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 08/46] net/cpfl: " Tyler Retzlaff
` (44 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/bnxt/bnxt_cpr.h | 4 ++--
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 ++++++++-----
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 ++--
5 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index c7b3480..43f06fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -107,7 +107,7 @@ struct bnxt_cp_ring_info {
/**
* Check validity of a completion ring entry. If the entry is valid, include a
- * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * C11 rte_memory_order_acquire fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
@@ -130,7 +130,7 @@ struct bnxt_cp_ring_info {
expected = !(raw_cons & ring_size);
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
if (valid == expected) {
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
return true;
}
return false;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 77bc382..36e0ac3 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- uint64_t rx_mbuf_alloc_fail;
+ RTE_ATOMIC(uint64_t) rx_mbuf_alloc_fail;
uint8_t need_realloc;
const struct rte_memzone *mz;
};
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 3542975..ca5d2c6 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -49,7 +49,8 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -86,7 +87,8 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -465,7 +467,8 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -1677,8 +1680,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return -ENOMEM;
}
}
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 775400f..04864e0 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -240,7 +240,7 @@
rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
/* Use acquire fence to order loads of descriptor words. */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Reload lower 64b of descriptors to make it ordered after info3_v. */
rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
((void *)&cpr->cp_desc_ring[cons + 7],
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 6a6feab..479f819 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -663,7 +663,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -724,7 +724,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 08/46] net/cpfl: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (6 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 07/46] net/bnxt: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 09/46] net/af_xdp: " Tyler Retzlaff
` (43 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ef19aa1..5b47e22 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -300,8 +300,9 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed +=
+ rte_atomic_load_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -349,7 +350,8 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 09/46] net/af_xdp: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (7 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 08/46] net/cpfl: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 10/46] net/octeon_ep: " Tyler Retzlaff
` (42 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 268a130..4833180 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -116,7 +116,7 @@ struct xsk_umem_info {
const struct rte_memzone *mz;
struct rte_mempool *mb_pool;
void *buffer;
- uint8_t refcnt;
+ RTE_ATOMIC(uint8_t) refcnt;
uint32_t max_xsks;
};
@@ -995,7 +995,8 @@ static int link_xdp_prog_with_dev(int ifindex, int fd, __u32 flags)
break;
xsk_socket__delete(rxq->xsk);
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1,
+ rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
/* free pkt_tx_queue */
@@ -1097,8 +1098,8 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
ret = -1;
goto out;
}
- if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
- __ATOMIC_ACQUIRE)) {
+ if (rte_atomic_load_explicit(&internals->rx_queues[i].umem->refcnt,
+ rte_memory_order_acquire)) {
*umem = internals->rx_queues[i].umem;
goto out;
}
@@ -1131,11 +1132,11 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
return NULL;
if (umem != NULL &&
- __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
+ rte_atomic_load_explicit(&umem->refcnt, rte_memory_order_acquire) <
umem->max_xsks) {
AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
internals->if_name, rxq->xsk_queue_idx);
- __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&umem->refcnt, 1, rte_memory_order_acquire);
}
}
@@ -1177,7 +1178,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
mb_pool->name, umem->max_xsks);
}
- __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&umem->refcnt, 1, rte_memory_order_release);
}
return umem;
@@ -1606,7 +1607,8 @@ struct msg_internal {
if (rxq->umem == NULL)
return -ENOMEM;
txq->umem = rxq->umem;
- reserve_before = __atomic_load_n(&rxq->umem->refcnt, __ATOMIC_ACQUIRE) <= 1;
+ reserve_before = rte_atomic_load_explicit(&rxq->umem->refcnt,
+ rte_memory_order_acquire) <= 1;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
@@ -1723,7 +1725,7 @@ struct msg_internal {
out_xsk:
xsk_socket__delete(rxq->xsk);
out_umem:
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1, rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
return ret;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 10/46] net/octeon_ep: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (8 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 09/46] net/af_xdp: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 11/46] net/octeontx: " Tyler Retzlaff
` (41 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +++--
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +++--
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx2_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx_ep_common.h | 4 ++--
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 ++++--
6 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.h b/drivers/net/octeon_ep/cnxk_ep_rx.h
index ecf95cd..9422042 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.h
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.h
@@ -98,7 +98,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(droq->pkts_sent_ism, rte_memory_order_relaxed);
new_pkts = val - droq->pkts_sent_prev;
droq->pkts_sent_prev = val;
@@ -111,7 +111,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_tx.c b/drivers/net/octeon_ep/cnxk_ep_tx.c
index 233c8aa..e093140 100644
--- a/drivers/net/octeon_ep/cnxk_ep_tx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_tx.c
@@ -15,7 +15,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(iq->inst_cnt_ism, rte_memory_order_relaxed);
iq->inst_cnt += val - iq->inst_cnt_prev;
iq->inst_cnt_prev = val;
@@ -27,7 +27,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 39f357e..39b28de 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -150,10 +150,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
- (void *)iq->inst_cnt_ism, ism_addr);
+ (void *)(uintptr_t)iq->inst_cnt_ism, ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
@@ -235,10 +235,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
- oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ oq_no, (void *)(uintptr_t)droq->pkts_sent_ism, ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 25e0e5a..2aeebb4 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -300,10 +300,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
- (void *)iq->inst_cnt_ism,
+ (void *)(uintptr_t)iq->inst_cnt_ism,
(unsigned int)ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
@@ -386,10 +386,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
- (void *)droq->pkts_sent_ism,
+ (void *)(uintptr_t)droq->pkts_sent_ism,
(unsigned int)ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 7776940..73eb0c9 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -218,7 +218,7 @@ struct otx_ep_iq_config {
*/
struct otx_ep_instr_queue {
/* Location in memory updated by SDP ISM */
- uint32_t *inst_cnt_ism;
+ RTE_ATOMIC(uint32_t) *inst_cnt_ism;
struct rte_mbuf **mbuf_list;
/* Pointer to the Virtual Base addr of the input ring. */
uint8_t *base_addr;
@@ -413,7 +413,7 @@ struct otx_ep_droq {
uint8_t ism_ena;
/* Pointer to host memory copy of output packet count, set by ISM */
- uint32_t *pkts_sent_ism;
+ RTE_ATOMIC(uint32_t) *pkts_sent_ism;
uint32_t pkts_sent_prev;
/* Statistics for this DROQ. */
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 59144e0..eb2d8c1 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -475,7 +475,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
@@ -871,7 +872,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 11/46] net/octeontx: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (9 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 10/46] net/octeon_ep: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 12/46] net/cxgbe: " Tyler Retzlaff
` (40 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/octeontx/octeontx_ethdev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index bec54fd..64d1666 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -31,7 +31,7 @@
/* Useful in stopping/closing event device if no of
* eth ports are using it.
*/
-uint16_t evdev_refcnt;
+RTE_ATOMIC(uint16_t) evdev_refcnt;
#define OCTEONTX_QLM_MODE_SGMII 7
#define OCTEONTX_QLM_MODE_XFI 12
@@ -559,7 +559,7 @@ enum octeontx_link_speed {
return 0;
/* Stopping/closing event device once all eth ports are closed. */
- if (__atomic_fetch_sub(&evdev_refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&evdev_refcnt, 1, rte_memory_order_acquire) - 1 == 0) {
rte_event_dev_stop(nic->evdev);
rte_event_dev_close(nic->evdev);
}
@@ -1593,7 +1593,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
nic->pko_vfid = pko_vfid;
nic->port_id = port;
nic->evdev = evdev;
- __atomic_fetch_add(&evdev_refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&evdev_refcnt, 1, rte_memory_order_acquire);
res = octeontx_port_open(nic);
if (res < 0)
@@ -1844,7 +1844,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
}
}
- __atomic_store_n(&evdev_refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&evdev_refcnt, 0, rte_memory_order_release);
/*
* Do 1:1 links for ports & queues. All queues would be mapped to
* one port. If there are more ports than queues, then some ports
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 12/46] net/cxgbe: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (10 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 11/46] net/octeontx: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:50 ` [PATCH 13/46] net/gve: " Tyler Retzlaff
` (39 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/cxgbe/clip_tbl.c | 12 ++++++------
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 ++++++++++----------
drivers/net/cxgbe/cxgbe_ofld.h | 6 +++---
drivers/net/cxgbe/l2t.c | 12 ++++++------
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +++++++++++----------
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 ++++++------
drivers/net/cxgbe/smt.h | 2 +-
10 files changed, 46 insertions(+), 45 deletions(-)
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
index b709e26..8588b88 100644
--- a/drivers/net/cxgbe/clip_tbl.c
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -55,7 +55,7 @@ void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
int ret;
t4_os_lock(&ce->lock);
- if (__atomic_fetch_sub(&ce->refcnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ce->refcnt, 1, rte_memory_order_relaxed) - 1 == 0) {
ret = clip6_release_mbox(dev, ce->addr);
if (ret)
dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
@@ -79,7 +79,7 @@ static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
unsigned int clipt_size = c->clipt_size;
for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -114,12 +114,12 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce = find_or_alloc_clipe(ctbl, lip);
if (ce) {
t4_os_lock(&ce->lock);
- if (__atomic_load_n(&ce->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&ce->refcnt, rte_memory_order_relaxed) == 0) {
rte_memcpy(ce->addr, lip, sizeof(ce->addr));
if (v6) {
ce->type = FILTER_TYPE_IPV6;
- __atomic_store_n(&ce->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ce->refcnt, 1,
+ rte_memory_order_relaxed);
ret = clip6_get_mbox(dev, lip);
if (ret)
dev_debug(adap,
@@ -129,7 +129,7 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce->type = FILTER_TYPE_IPV4;
}
} else {
- __atomic_fetch_add(&ce->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ce->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&ce->lock);
}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
index 3b2be66..439fcf6 100644
--- a/drivers/net/cxgbe/clip_tbl.h
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -13,7 +13,7 @@ struct clip_entry {
enum filter_type type; /* entry type */
u32 addr[4]; /* IPV4 or IPV6 address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct clip_tbl {
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index c479454..2ed21f2 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -418,15 +418,15 @@ void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- __atomic_fetch_sub(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
}
@@ -448,15 +448,15 @@ void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
t->tid_tab[tid] = data;
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
- __atomic_fetch_add(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
index 33697c7..48a5ec0 100644
--- a/drivers/net/cxgbe/cxgbe_ofld.h
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -60,10 +60,10 @@ struct tid_info {
unsigned int atids_in_use;
/* TIDs in the TCAM */
- u32 tids_in_use;
+ RTE_ATOMIC(u32) tids_in_use;
/* TIDs in the HASH */
- u32 hash_tids_in_use;
- u32 conns_in_use;
+ RTE_ATOMIC(u32) hash_tids_in_use;
+ RTE_ATOMIC(u32) conns_in_use;
rte_spinlock_t atid_lock __rte_cache_aligned;
rte_spinlock_t ftid_lock;
diff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c
index 21f4019..ecb5fec 100644
--- a/drivers/net/cxgbe/l2t.c
+++ b/drivers/net/cxgbe/l2t.c
@@ -14,8 +14,8 @@
*/
void cxgbe_l2t_release(struct l2t_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -112,7 +112,7 @@ static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
struct l2t_entry *first_free = NULL;
for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -151,18 +151,18 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
diff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h
index e4c0ebe..67d0197 100644
--- a/drivers/net/cxgbe/l2t.h
+++ b/drivers/net/cxgbe/l2t.h
@@ -30,7 +30,7 @@ struct l2t_entry {
u8 lport; /* destination port */
u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct l2t_data {
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
index 8e0da9c..79a7daa 100644
--- a/drivers/net/cxgbe/mps_tcam.c
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -76,7 +76,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- __atomic_fetch_add(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
@@ -98,7 +98,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
entry = &mpstcam->entry[ret];
memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
if (cxgbe_update_free_idx(mpstcam))
@@ -147,7 +147,7 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
* provided value is -1
*/
if (entry->state == MPS_ENTRY_UNUSED) {
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
}
@@ -165,7 +165,7 @@ static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
{
memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_UNUSED;
}
@@ -190,12 +190,13 @@ int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
return -EINVAL;
}
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
entry->mask, idx, 1, pi->port_id,
false);
else
- ret = __atomic_fetch_sub(&entry->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&entry->refcnt, 1,
+ rte_memory_order_relaxed) - 1;
if (ret == 0) {
reset_mpstcam_entry(entry);
@@ -222,7 +223,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
goto out_unlock;
ret = t4_alloc_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -231,7 +232,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
@@ -253,7 +254,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) != 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) != 1)
goto out_unlock;
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -262,7 +263,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
diff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h
index 363786b..4b421f7 100644
--- a/drivers/net/cxgbe/mps_tcam.h
+++ b/drivers/net/cxgbe/mps_tcam.h
@@ -29,7 +29,7 @@ struct mps_tcam_entry {
u8 mask[RTE_ETHER_ADDR_LEN];
struct mpstcam_table *mpstcam; /* backptr */
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
};
struct mpstcam_table {
diff --git a/drivers/net/cxgbe/smt.c b/drivers/net/cxgbe/smt.c
index 4e14a73..2f961c1 100644
--- a/drivers/net/cxgbe/smt.c
+++ b/drivers/net/cxgbe/smt.c
@@ -119,7 +119,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end, *first_free = NULL;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -156,7 +156,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_smte(s, smac);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->pfvf = pfvf;
rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
ret = write_smt_entry(dev, e);
@@ -168,9 +168,9 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
goto out_write_unlock;
}
e->state = SMT_STATE_SWITCHING;
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
@@ -195,8 +195,8 @@ struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
void cxgbe_smt_release(struct smt_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/smt.h b/drivers/net/cxgbe/smt.h
index 531810e..8b378ae 100644
--- a/drivers/net/cxgbe/smt.h
+++ b/drivers/net/cxgbe/smt.h
@@ -23,7 +23,7 @@ struct smt_entry {
u16 pfvf;
u16 hw_idx;
u8 src_mac[RTE_ETHER_ADDR_LEN];
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
rte_spinlock_t lock;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 13/46] net/gve: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (11 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 12/46] net/cxgbe: " Tyler Retzlaff
@ 2024-03-20 20:50 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 14/46] net/memif: " Tyler Retzlaff
` (38 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:50 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/gve/base/gve_osdep.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/gve/base/gve_osdep.h b/drivers/net/gve/base/gve_osdep.h
index a3702f4..c0ee0d5 100644
--- a/drivers/net/gve/base/gve_osdep.h
+++ b/drivers/net/gve/base/gve_osdep.h
@@ -135,7 +135,7 @@ struct gve_dma_mem {
static inline void *
gve_alloc_dma_mem(struct gve_dma_mem *mem, u64 size)
{
- static uint16_t gve_dma_memzone_id;
+ static RTE_ATOMIC(uint16_t) gve_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -143,7 +143,7 @@ struct gve_dma_mem {
return NULL;
snprintf(z_name, sizeof(z_name), "gve_dma_%u",
- __atomic_fetch_add(&gve_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&gve_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG,
PAGE_SIZE);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 14/46] net/memif: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (12 preceding siblings ...)
2024-03-20 20:50 ` [PATCH 13/46] net/gve: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 15/46] net/sfc: " Tyler Retzlaff
` (37 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/memif/memif.h | 4 ++--
drivers/net/memif/rte_eth_memif.c | 50 +++++++++++++++++++--------------------
2 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/memif/memif.h b/drivers/net/memif/memif.h
index cb72c69..ccaa218 100644
--- a/drivers/net/memif/memif.h
+++ b/drivers/net/memif/memif.h
@@ -169,9 +169,9 @@ typedef struct __rte_packed __rte_aligned(128)
uint32_t cookie; /**< MEMIF_COOKIE */
uint16_t flags; /**< flags */
#define MEMIF_RING_FLAG_MASK_INT 1 /**< disable interrupt mode */
- uint16_t head; /**< pointer to ring buffer head */
+ RTE_ATOMIC(uint16_t) head; /**< pointer to ring buffer head */
MEMIF_CACHELINE_ALIGN_MARK(cacheline1);
- uint16_t tail; /**< pointer to ring buffer tail */
+ RTE_ATOMIC(uint16_t) tail; /**< pointer to ring buffer tail */
MEMIF_CACHELINE_ALIGN_MARK(cacheline2);
memif_desc_t desc[0]; /**< buffer descriptors */
} memif_ring_t;
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 18377d9..16da22b 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -262,7 +262,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* in function eth_memif_rx for C2S queues.
*/
- cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ cur_tail = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
while (mq->last_tail != cur_tail) {
RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
@@ -334,10 +334,10 @@ struct mp_region_msg {
if (type == MEMIF_RING_C2S) {
cur_slot = mq->last_head;
- last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire);
} else {
cur_slot = mq->last_tail;
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
}
if (cur_slot == last_slot)
@@ -473,7 +473,7 @@ struct mp_region_msg {
no_free_bufs:
if (type == MEMIF_RING_C2S) {
- __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, cur_slot, rte_memory_order_release);
mq->last_head = cur_slot;
} else {
mq->last_tail = cur_slot;
@@ -485,7 +485,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
while (n_slots--) {
@@ -493,7 +493,7 @@ struct mp_region_msg {
d0 = &ring->desc[s0];
d0->length = pmd->run.pkt_buffer_size;
}
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
}
mq->n_pkts += n_rx_pkts;
@@ -541,7 +541,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* to synchronize it between threads.
*/
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
if (cur_slot == last_slot)
goto refill;
n_slots = last_slot - cur_slot;
@@ -591,7 +591,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
if (n_slots < 32)
@@ -620,7 +620,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_tx.
*/
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
mq->n_pkts += n_rx_pkts;
@@ -668,9 +668,9 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot +
- __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
} else {
/* For S2C queues ring->tail is updated by the sender and
* this function is called in the context of sending thread.
@@ -678,8 +678,8 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
- n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
+ slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_relaxed);
+ n_free = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire) - slot;
}
uint16_t i;
@@ -792,9 +792,9 @@ struct mp_region_msg {
no_free_slots:
if (type == MEMIF_RING_C2S)
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
else
- __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, slot, rte_memory_order_release);
if (((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) &&
(rte_intr_fd_get(mq->intr_handle) >= 0)) {
@@ -882,7 +882,7 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot + mq->last_tail;
int used_slots;
@@ -942,7 +942,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_rx for C2S rings.
*/
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
/* Send interrupt, if enabled. */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
@@ -1155,8 +1155,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_c2s_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1175,8 +1175,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_s2c_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1314,8 +1314,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
@@ -1330,8 +1330,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 15/46] net/sfc: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (13 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 14/46] net/memif: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-21 18:11 ` Aaron Conole
2024-03-20 20:51 ` [PATCH 16/46] net/thunderx: " Tyler Retzlaff
` (36 subsequent siblings)
51 siblings, 1 reply; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/sfc/meson.build | 5 ++---
drivers/net/sfc/sfc_mae_counter.c | 30 +++++++++++++++---------------
drivers/net/sfc/sfc_repr_proxy.c | 8 ++++----
drivers/net/sfc/sfc_stats.h | 8 ++++----
4 files changed, 25 insertions(+), 26 deletions(-)
diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index 5adde68..d3603a0 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -47,9 +47,8 @@ int main(void)
__int128 a = 0;
__int128 b;
- b = __atomic_load_n(&a, __ATOMIC_RELAXED);
- __atomic_store(&b, &a, __ATOMIC_RELAXED);
- __atomic_store_n(&b, a, __ATOMIC_RELAXED);
+ b = rte_atomic_load_explicit(&a, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&b, a, rte_memory_order_relaxed);
return 0;
}
'''
diff --git a/drivers/net/sfc/sfc_mae_counter.c b/drivers/net/sfc/sfc_mae_counter.c
index ba17295..a32da84 100644
--- a/drivers/net/sfc/sfc_mae_counter.c
+++ b/drivers/net/sfc/sfc_mae_counter.c
@@ -131,8 +131,8 @@
* And it does not depend on different stores/loads in other threads.
* Paired with relaxed ordering in counter increment.
*/
- __atomic_store(&p->reset.pkts_bytes.int128,
- &p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&p->reset.pkts_bytes.int128,
+ p->value.pkts_bytes.int128, rte_memory_order_relaxed);
p->generation_count = generation_count;
p->ft_switch_hit_counter = counterp->ft_switch_hit_counter;
@@ -142,7 +142,7 @@
* at the beginning of delete operation. Release ordering is
* paired with acquire ordering on load in counter increment operation.
*/
- __atomic_store_n(&p->inuse, true, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&p->inuse, true, rte_memory_order_release);
sfc_info(sa, "enabled MAE counter 0x%x-#%u with reset pkts=%" PRIu64
" bytes=%" PRIu64, counterp->type, mae_counter.id,
@@ -189,7 +189,7 @@
* paired with acquire ordering on load in counter increment operation.
*/
p = &counters->mae_counters[mae_counter->id];
- __atomic_store_n(&p->inuse, false, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&p->inuse, false, rte_memory_order_release);
rc = efx_mae_counters_free_type(sa->nic, counter->type, 1, &unused,
mae_counter, NULL);
@@ -228,7 +228,7 @@
* Acquire ordering is paired with release ordering in counter add
* and delete operations.
*/
- __atomic_load(&p->inuse, &inuse, __ATOMIC_ACQUIRE);
+ inuse = rte_atomic_load_explicit(&p->inuse, rte_memory_order_acquire);
if (!inuse) {
/*
* Two possible cases include:
@@ -258,15 +258,15 @@
* And it does not depend on different stores/loads in other threads.
* Paired with relaxed ordering on counter reset.
*/
- __atomic_store(&p->value.pkts_bytes,
- &cnt_val.pkts_bytes, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&p->value.pkts_bytes,
+ cnt_val.pkts_bytes, rte_memory_order_relaxed);
if (p->ft_switch_hit_counter != NULL) {
uint64_t ft_switch_hit_counter;
ft_switch_hit_counter = *p->ft_switch_hit_counter + pkts;
- __atomic_store_n(p->ft_switch_hit_counter, ft_switch_hit_counter,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(p->ft_switch_hit_counter, ft_switch_hit_counter,
+ rte_memory_order_relaxed);
}
sfc_info(sa, "update MAE counter 0x%x-#%u: pkts+%" PRIu64 "=%" PRIu64
@@ -498,8 +498,8 @@
&sa->mae.counter_registry;
int32_t rc;
- while (__atomic_load_n(&counter_registry->polling.thread.run,
- __ATOMIC_ACQUIRE)) {
+ while (rte_atomic_load_explicit(&counter_registry->polling.thread.run,
+ rte_memory_order_acquire)) {
rc = sfc_mae_counter_poll_packets(sa);
if (rc == 0) {
/*
@@ -684,8 +684,8 @@
int rc;
/* Ensure that flag is set before attempting to join thread */
- __atomic_store_n(&counter_registry->polling.thread.run, false,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&counter_registry->polling.thread.run, false,
+ rte_memory_order_release);
rc = rte_thread_join(counter_registry->polling.thread.id, NULL);
if (rc != 0)
@@ -1024,8 +1024,8 @@
* And it does not depend on different stores/loads in other threads.
* Paired with relaxed ordering in counter increment.
*/
- value.pkts_bytes.int128 = __atomic_load_n(&p->value.pkts_bytes.int128,
- __ATOMIC_RELAXED);
+ value.pkts_bytes.int128 = rte_atomic_load_explicit(&p->value.pkts_bytes.int128,
+ rte_memory_order_relaxed);
data->hits_set = 1;
data->hits = value.pkts - p->reset.pkts;
diff --git a/drivers/net/sfc/sfc_repr_proxy.c b/drivers/net/sfc/sfc_repr_proxy.c
index ff13795..7275901 100644
--- a/drivers/net/sfc/sfc_repr_proxy.c
+++ b/drivers/net/sfc/sfc_repr_proxy.c
@@ -83,7 +83,7 @@
* Release ordering enforces marker set after data is populated.
* Paired with acquire ordering in sfc_repr_proxy_mbox_handle().
*/
- __atomic_store_n(&mbox->write_marker, true, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&mbox->write_marker, true, rte_memory_order_release);
/*
* Wait for the representor routine to process the request.
@@ -94,7 +94,7 @@
* Paired with release ordering in sfc_repr_proxy_mbox_handle()
* on acknowledge write.
*/
- if (__atomic_load_n(&mbox->ack, __ATOMIC_ACQUIRE))
+ if (rte_atomic_load_explicit(&mbox->ack, rte_memory_order_acquire))
break;
rte_delay_ms(1);
@@ -119,7 +119,7 @@
* Paired with release ordering in sfc_repr_proxy_mbox_send()
* on marker set.
*/
- if (!__atomic_load_n(&mbox->write_marker, __ATOMIC_ACQUIRE))
+ if (!rte_atomic_load_explicit(&mbox->write_marker, rte_memory_order_acquire))
return;
mbox->write_marker = false;
@@ -146,7 +146,7 @@
* Paired with acquire ordering in sfc_repr_proxy_mbox_send()
* on acknowledge read.
*/
- __atomic_store_n(&mbox->ack, true, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&mbox->ack, true, rte_memory_order_release);
}
static void
diff --git a/drivers/net/sfc/sfc_stats.h b/drivers/net/sfc/sfc_stats.h
index 597e14d..25c2b9e 100644
--- a/drivers/net/sfc/sfc_stats.h
+++ b/drivers/net/sfc/sfc_stats.h
@@ -51,8 +51,8 @@
* Store the result atomically to guarantee that the reader
* core sees both counter updates together.
*/
- __atomic_store_n(&st->pkts_bytes.int128, result.pkts_bytes.int128,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&st->pkts_bytes.int128, result.pkts_bytes.int128,
+ rte_memory_order_relaxed);
#else
st->pkts += pkts;
st->bytes += bytes;
@@ -66,8 +66,8 @@
sfc_pkts_bytes_get(const union sfc_pkts_bytes *st, union sfc_pkts_bytes *result)
{
#if SFC_SW_STATS_ATOMIC
- result->pkts_bytes.int128 = __atomic_load_n(&st->pkts_bytes.int128,
- __ATOMIC_RELAXED);
+ result->pkts_bytes.int128 = rte_atomic_load_explicit(&st->pkts_bytes.int128,
+ rte_memory_order_relaxed);
#else
*result = *st;
#endif
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH 15/46] net/sfc: use rte stdatomic API
2024-03-20 20:51 ` [PATCH 15/46] net/sfc: " Tyler Retzlaff
@ 2024-03-21 18:11 ` Aaron Conole
2024-03-21 18:15 ` Tyler Retzlaff
0 siblings, 1 reply; 300+ messages in thread
From: Aaron Conole @ 2024-03-21 18:11 UTC (permalink / raw)
To: Tyler Retzlaff
Cc: dev, Mattias Rönnblom, Morten Brørup,
Abdullah Sevincer, Ajit Khaparde, Alok Prasad, Anatoly Burakov,
Andrew Rybchenko, Anoob Joseph, Bruce Richardson, Byron Marohn,
Chenbo Xia, Chengwen Feng, Ciara Loftus, Ciara Power,
Dariusz Sosnowski, David Hunt, Devendra Singh Rawat,
Erik Gabriel Carrillo, Guoyang Zhou, Harman Kalra,
Harry van Haaren, Honnappa Nagarahalli, Jakub Grajciar,
Jerin Jacob, Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai,
Jingjing Wu, Joshua Washington, Joyce Kong, Junfeng Guo,
Kevin Laatz, Konstantin Ananyev, Liang Ma, Long Li,
Maciej Czekaj, Matan Azrad, Maxime Coquelin, Nicolas Chautru,
Ori Kam, Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy,
Reshma Pattan, Rosen Xu, Ruifeng Wang, Rushil Gupta,
Sameh Gobriel, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang,
Yipeng Wang, Yisen Zhuang, Yuying Zhang, Ziyang Xuan
Tyler Retzlaff <roretzla@linux.microsoft.com> writes:
> Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> ---
> drivers/net/sfc/meson.build | 5 ++---
> drivers/net/sfc/sfc_mae_counter.c | 30 +++++++++++++++---------------
> drivers/net/sfc/sfc_repr_proxy.c | 8 ++++----
> drivers/net/sfc/sfc_stats.h | 8 ++++----
> 4 files changed, 25 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
> index 5adde68..d3603a0 100644
> --- a/drivers/net/sfc/meson.build
> +++ b/drivers/net/sfc/meson.build
> @@ -47,9 +47,8 @@ int main(void)
> __int128 a = 0;
> __int128 b;
>
> - b = __atomic_load_n(&a, __ATOMIC_RELAXED);
> - __atomic_store(&b, &a, __ATOMIC_RELAXED);
> - __atomic_store_n(&b, a, __ATOMIC_RELAXED);
> + b = rte_atomic_load_explicit(&a, rte_memory_order_relaxed);
> + rte_atomic_store_explicit(&b, a, rte_memory_order_relaxed);
> return 0;
> }
> '''
I think this is a case where simple find/replace is a problem. For
example, this is a sample file that the meson build uses to determine if
libatomic is properly installed. However, it is very bare-bones.
Your change is likely causing a compile error when cc.links happens in
the meson file. That leads to the ABI error.
If the goal is to remove all the intrinsics, then maybe a better change
would be dropping this libatomic check from here completely.
WDYT?
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH 15/46] net/sfc: use rte stdatomic API
2024-03-21 18:11 ` Aaron Conole
@ 2024-03-21 18:15 ` Tyler Retzlaff
0 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 18:15 UTC (permalink / raw)
To: Aaron Conole
Cc: dev, Mattias Rönnblom, Morten Brørup,
Abdullah Sevincer, Ajit Khaparde, Alok Prasad, Anatoly Burakov,
Andrew Rybchenko, Anoob Joseph, Bruce Richardson, Byron Marohn,
Chenbo Xia, Chengwen Feng, Ciara Loftus, Ciara Power,
Dariusz Sosnowski, David Hunt, Devendra Singh Rawat,
Erik Gabriel Carrillo, Guoyang Zhou, Harman Kalra,
Harry van Haaren, Honnappa Nagarahalli, Jakub Grajciar,
Jerin Jacob, Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai,
Jingjing Wu, Joshua Washington, Joyce Kong, Junfeng Guo,
Kevin Laatz, Konstantin Ananyev, Liang Ma, Long Li,
Maciej Czekaj, Matan Azrad, Maxime Coquelin, Nicolas Chautru,
Ori Kam, Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy,
Reshma Pattan, Rosen Xu, Ruifeng Wang, Rushil Gupta,
Sameh Gobriel, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang,
Yipeng Wang, Yisen Zhuang, Yuying Zhang, Ziyang Xuan
On Thu, Mar 21, 2024 at 02:11:00PM -0400, Aaron Conole wrote:
> Tyler Retzlaff <roretzla@linux.microsoft.com> writes:
>
> > Replace the use of gcc builtin __atomic_xxx intrinsics with
> > corresponding rte_atomic_xxx optional rte stdatomic API.
> >
> > Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > ---
> > drivers/net/sfc/meson.build | 5 ++---
> > drivers/net/sfc/sfc_mae_counter.c | 30 +++++++++++++++---------------
> > drivers/net/sfc/sfc_repr_proxy.c | 8 ++++----
> > drivers/net/sfc/sfc_stats.h | 8 ++++----
> > 4 files changed, 25 insertions(+), 26 deletions(-)
> >
> > diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
> > index 5adde68..d3603a0 100644
> > --- a/drivers/net/sfc/meson.build
> > +++ b/drivers/net/sfc/meson.build
> > @@ -47,9 +47,8 @@ int main(void)
> > __int128 a = 0;
> > __int128 b;
> >
> > - b = __atomic_load_n(&a, __ATOMIC_RELAXED);
> > - __atomic_store(&b, &a, __ATOMIC_RELAXED);
> > - __atomic_store_n(&b, a, __ATOMIC_RELAXED);
> > + b = rte_atomic_load_explicit(&a, rte_memory_order_relaxed);
> > + rte_atomic_store_explicit(&b, a, rte_memory_order_relaxed);
> > return 0;
> > }
> > '''
>
> I think this is a case where simple find/replace is a problem. For
> example, this is a sample file that the meson build uses to determine if
> libatomic is properly installed. However, it is very bare-bones.
>
> Your change is likely causing a compile error when cc.links happens in
> the meson file. That leads to the ABI error.
>
> If the goal is to remove all the intrinsics, then maybe a better change
> would be dropping this libatomic check from here completely.
>
> WDYT?
yeah, actually it wasn't a search replace mistake it was an
unintentionally included file where i was experimenting with keeping the
test (thought i had reverted it).
i shouldn't have added the change to the series thanks for pointing the
mistake out and sorry for the noise.
appreciate it!
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 16/46] net/thunderx: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (14 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 15/46] net/sfc: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 17/46] net/virtio: " Tyler Retzlaff
` (35 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/thunderx/nicvf_rxtx.c | 9 +++++----
drivers/net/thunderx/nicvf_struct.h | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index defa551..2cb6a99 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -374,8 +374,8 @@
NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
(nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
- next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
- __ATOMIC_ACQUIRE);
+ next_tail = rte_atomic_fetch_add_explicit(&rbdr->next_tail, to_fill,
+ rte_memory_order_acquire);
ltail = next_tail;
for (i = 0; i < to_fill; i++) {
struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
@@ -385,9 +385,10 @@
ltail++;
}
- rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&rbdr->tail, next_tail,
+ rte_memory_order_relaxed);
- __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&rbdr->tail, ltail, rte_memory_order_release);
nicvf_addr_write(door, to_fill);
return to_fill;
}
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 13cf8fe..6507898 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -20,8 +20,8 @@ struct nicvf_rbdr {
struct rbdr_entry_t *desc;
nicvf_iova_addr_t phys;
uint32_t buffsz;
- uint32_t tail;
- uint32_t next_tail;
+ RTE_ATOMIC(uint32_t) tail;
+ RTE_ATOMIC(uint32_t) next_tail;
uint32_t head;
uint32_t qlen_mask;
} __rte_cache_aligned;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 17/46] net/virtio: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (15 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 16/46] net/thunderx: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 18/46] net/hinic: " Tyler Retzlaff
` (34 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/virtio/virtio_ring.h | 4 +--
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 ++++-----
drivers/net/virtio/virtqueue.h | 32 ++++++++++++------------
3 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b..2a25751 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -59,7 +59,7 @@ struct vring_used_elem {
struct vring_used {
uint16_t flags;
- uint16_t idx;
+ RTE_ATOMIC(uint16_t) idx;
struct vring_used_elem ring[];
};
@@ -70,7 +70,7 @@ struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t id;
- uint16_t flags;
+ RTE_ATOMIC(uint16_t) flags;
};
#define RING_EVENT_FLAGS_ENABLE 0x0
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4fdfe70..24e2b2c 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -948,7 +948,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+ uint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -1037,8 +1037,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vring->desc[vq->used_idx].flags, flags,
+ rte_memory_order_release);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {
@@ -1057,9 +1057,9 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
struct vring *vring = &dev->vrings.split[queue_idx];
/* Consume avail ring, using used ring idx as first one */
- while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ while (rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
!= vring->avail->idx) {
- avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ avail_idx = rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
& (vring->num - 1);
desc_idx = vring->avail->ring[avail_idx];
@@ -1070,7 +1070,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
uep->id = desc_idx;
uep->len = n_descs;
- __atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&vring->used->idx, 1, rte_memory_order_relaxed);
}
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 5d0c039..b7bbdde 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -37,7 +37,7 @@
virtio_mb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
else
rte_mb();
}
@@ -46,7 +46,7 @@
virtio_rmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
else
rte_io_rmb();
}
@@ -55,7 +55,7 @@
virtio_wmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
else
rte_io_wmb();
}
@@ -67,12 +67,12 @@
uint16_t flags;
if (weak_barriers) {
-/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
+/* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+ flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);
#else
flags = dp->flags;
rte_io_rmb();
@@ -90,12 +90,12 @@
uint16_t flags, uint8_t weak_barriers)
{
if (weak_barriers) {
-/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
+/* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);
#else
rte_io_wmb();
dp->flags = flags;
@@ -425,7 +425,7 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
if (vq->hw->weak_barriers) {
/**
- * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+ * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it
* reports a slightly better perf, which comes from the saved
* branch by the compiler.
* The if and else branches are identical with the smp and io
@@ -435,8 +435,8 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
idx = vq->vq_split.ring.used->idx;
rte_smp_rmb();
#else
- idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
- __ATOMIC_ACQUIRE);
+ idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,
+ rte_memory_order_acquire);
#endif
} else {
idx = vq->vq_split.ring.used->idx;
@@ -454,7 +454,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
vq_update_avail_idx(struct virtqueue *vq)
{
if (vq->hw->weak_barriers) {
- /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+ /* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as
* it reports a slightly better perf, which comes from the
* saved branch by the compiler.
* The if and else branches are identical with the smp and
@@ -464,8 +464,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
rte_smp_wmb();
vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
#else
- __atomic_store_n(&vq->vq_split.ring.avail->idx,
- vq->vq_avail_idx, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,
+ vq->vq_avail_idx, rte_memory_order_release);
#endif
} else {
rte_io_wmb();
@@ -528,8 +528,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
- used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
- __ATOMIC_RELAXED); \
+ used_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \
+ rte_memory_order_relaxed); \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
if (virtio_with_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
@@ -546,7 +546,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
" avail.flags=0x%x; used.flags=0x%x", \
(vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
(vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
- __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+ rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, rte_memory_order_relaxed), \
(vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
} while (0)
#else
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 18/46] net/hinic: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (16 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 17/46] net/virtio: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 19/46] net/idpf: " Tyler Retzlaff
` (33 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 7adb6e3..c2cd295 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -1004,7 +1004,7 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
while (pkts < nb_pkts) {
/* 2. current ci is done */
rx_cqe = &rxq->rx_cqe[sw_ci];
- status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
+ status = rte_atomic_load_explicit(&rx_cqe->status, rte_memory_order_acquire);
if (!HINIC_GET_RX_DONE_BE(status))
break;
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 5c30339..d77ef51 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -29,7 +29,7 @@ struct hinic_rq_ctrl {
};
struct hinic_rq_cqe {
- u32 status;
+ RTE_ATOMIC(u32) status;
u32 vlan_len;
u32 offload_type;
u32 rss_hash;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 19/46] net/idpf: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (17 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 18/46] net/hinic: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 20/46] net/qede: " Tyler Retzlaff
` (32 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/idpf/idpf_ethdev.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 86151c9..1df4d6b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -259,8 +259,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed += rte_atomic_load_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -308,7 +308,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&rxq->rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 20/46] net/qede: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (18 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 19/46] net/idpf: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 21/46] net/ring: " Tyler Retzlaff
` (31 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/qede/base/bcm_osal.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2edeb38..abd1186 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -51,11 +51,11 @@ void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
/* Counter to track current memzone allocated */
static uint16_t ecore_mz_count;
-static uint32_t ref_cnt;
+static RTE_ATOMIC(uint32_t) ref_cnt;
int ecore_mz_mapping_alloc(void)
{
- if (__atomic_fetch_add(&ref_cnt, 1, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_fetch_add_explicit(&ref_cnt, 1, rte_memory_order_relaxed) == 0) {
ecore_mz_mapping = rte_calloc("ecore_mz_map",
rte_memzone_max_get(), sizeof(struct rte_memzone *), 0);
}
@@ -68,7 +68,7 @@ int ecore_mz_mapping_alloc(void)
void ecore_mz_mapping_free(void)
{
- if (__atomic_fetch_sub(&ref_cnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ref_cnt, 1, rte_memory_order_relaxed) - 1 == 0) {
rte_free(ecore_mz_mapping);
ecore_mz_mapping = NULL;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 21/46] net/ring: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (19 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 20/46] net/qede: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 22/46] vdpa/mlx5: " Tyler Retzlaff
` (30 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/ring/rte_eth_ring.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 48953dd..b16f5d5 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -44,8 +44,8 @@ enum dev_action {
struct ring_queue {
struct rte_ring *rng;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_internals {
@@ -82,7 +82,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts += nb_rx;
else
- __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->rx_pkts, nb_rx, rte_memory_order_relaxed);
return nb_rx;
}
@@ -96,7 +96,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SP_ENQ)
r->tx_pkts += nb_tx;
else
- __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->tx_pkts, nb_tx, rte_memory_order_relaxed);
return nb_tx;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 22/46] vdpa/mlx5: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (20 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 21/46] net/ring: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 23/46] raw/ifpga: " Tyler Retzlaff
` (29 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +++++++++---------
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +++++------
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 ++-
6 files changed, 52 insertions(+), 44 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f900384..98c39a5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -261,8 +261,8 @@
uint32_t timeout = 0;
/* Check and wait all close tasks done. */
- while (__atomic_load_n(&priv->dev_close_progress,
- __ATOMIC_RELAXED) != 0 && timeout < 1000) {
+ while (rte_atomic_load_explicit(&priv->dev_close_progress,
+ rte_memory_order_relaxed) != 0 && timeout < 1000) {
rte_delay_us_sleep(10000);
timeout++;
}
@@ -294,8 +294,8 @@
priv->last_c_thrd_idx = 0;
else
priv->last_c_thrd_idx++;
- __atomic_store_n(&priv->dev_close_progress,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress,
+ 1, rte_memory_order_relaxed);
if (mlx5_vdpa_task_add(priv,
priv->last_c_thrd_idx,
MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
@@ -319,8 +319,8 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress, 0,
+ rte_memory_order_relaxed);
priv->state = MLX5_VDPA_STATE_PROBED;
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
@@ -664,7 +664,9 @@
static int
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t max_queues, index, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
@@ -847,8 +849,8 @@
if (conf_thread_mng.initializer_priv == priv)
if (mlx5_vdpa_mult_threads_create())
goto error;
- __atomic_fetch_add(&conf_thread_mng.refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
+ rte_memory_order_relaxed);
}
if (mlx5_vdpa_create_dev_resources(priv))
goto error;
@@ -937,8 +939,8 @@
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
if (priv->use_c_thread)
- if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
- 1, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
+ 1, rte_memory_order_relaxed) == 1)
mlx5_vdpa_mult_threads_destroy(true);
rte_free(priv);
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 7b37c98..0cc67ed 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type {
struct mlx5_vdpa_task {
struct mlx5_vdpa_priv *priv;
enum mlx5_vdpa_task_type type;
- uint32_t *remaining_cnt;
- uint32_t *err_cnt;
+ RTE_ATOMIC(uint32_t) *remaining_cnt;
+ RTE_ATOMIC(uint32_t) *err_cnt;
uint32_t idx;
} __rte_packed __rte_aligned(4);
@@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread {
struct mlx5_vdpa_conf_thread_mng {
void *initializer_priv;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
uint32_t max_thrds;
pthread_mutex_t cthrd_lock;
struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
@@ -212,7 +212,7 @@ struct mlx5_vdpa_priv {
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
uint16_t last_c_thrd_idx;
- uint16_t dev_close_progress;
+ RTE_ATOMIC(uint16_t) dev_close_progress;
uint16_t num_mrs; /* Number of memory regions. */
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
@@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num);
int
mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time);
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
int
mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
void
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
index 68ed841..84f611c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
@@ -48,7 +48,7 @@
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num)
{
struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
@@ -70,8 +70,8 @@
return -1;
for (i = 0 ; i < num; i++)
if (task[i].remaining_cnt)
- __atomic_fetch_add(task[i].remaining_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
+ rte_memory_order_relaxed);
/* wake up conf thread. */
pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
@@ -80,16 +80,16 @@
}
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time)
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
{
/* Check and wait all tasks done. */
- while (__atomic_load_n(remaining_cnt,
- __ATOMIC_RELAXED) != 0) {
+ while (rte_atomic_load_explicit(remaining_cnt,
+ rte_memory_order_relaxed) != 0) {
rte_delay_us_sleep(sleep_time);
}
- if (__atomic_load_n(err_cnt,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(err_cnt,
+ rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Tasks done with error.");
return true;
}
@@ -137,8 +137,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to register mr %d.", task.idx);
- __atomic_fetch_add(task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task.err_cnt, 1,
+ rte_memory_order_relaxed);
}
break;
case MLX5_VDPA_TASK_SETUP_VIRTQ:
@@ -149,8 +149,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to setup virtq %d.", task.idx);
- __atomic_fetch_add(
- task.err_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ task.err_cnt, 1, rte_memory_order_relaxed);
}
virtq->enable = 1;
pthread_mutex_unlock(&virtq->virtq_lock);
@@ -164,9 +164,9 @@
DRV_LOG(ERR,
"Failed to stop virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -176,9 +176,9 @@
DRV_LOG(ERR,
"Failed to get negotiated features virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -200,9 +200,9 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(
+ rte_atomic_store_explicit(
&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
break;
case MLX5_VDPA_TASK_PREPARE_VIRTQ:
ret = mlx5_vdpa_virtq_single_resource_prepare(
@@ -211,9 +211,9 @@
DRV_LOG(ERR,
"Failed to prepare virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
break;
default:
@@ -222,8 +222,8 @@
break;
}
if (task.remaining_cnt)
- __atomic_fetch_sub(task.remaining_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(task.remaining_cnt,
+ 1, rte_memory_order_relaxed);
}
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 0fa671f..a207734 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -92,7 +92,9 @@
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
uint64_t features;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index e333f0b..4dfe800 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -279,7 +279,9 @@
uint8_t mode = 0;
int ret = -rte_errno;
uint32_t i, thrd_idx, data[1];
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
(priv->vid, &mode, &priv->vmem_info.size,
&priv->vmem_info.gcd, &priv->vmem_info.entries_num);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 607e290..093cdd0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -666,7 +666,9 @@
{
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
struct rte_vhost_vring vq;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 23/46] raw/ifpga: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (21 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 22/46] vdpa/mlx5: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 24/46] event/opdl: " Tyler Retzlaff
` (28 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/raw/ifpga/ifpga_rawdev.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c
index f89bd3f..78d3c88 100644
--- a/drivers/raw/ifpga/ifpga_rawdev.c
+++ b/drivers/raw/ifpga/ifpga_rawdev.c
@@ -73,7 +73,7 @@
static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
-static int ifpga_monitor_refcnt;
+static RTE_ATOMIC(int) ifpga_monitor_refcnt;
static rte_thread_t ifpga_monitor_start_thread;
static struct ifpga_rawdev *
@@ -512,7 +512,7 @@ static int set_surprise_link_check_aer(
int gsd_enable, ret;
#define MS 1000
- while (__atomic_load_n(&ifpga_monitor_refcnt, __ATOMIC_RELAXED)) {
+ while (rte_atomic_load_explicit(&ifpga_monitor_refcnt, rte_memory_order_relaxed)) {
gsd_enable = 0;
for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
ifpga_rdev = &ifpga_rawdevices[i];
@@ -549,7 +549,7 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 1;
- if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_fetch_add_explicit(&ifpga_monitor_refcnt, 1, rte_memory_order_relaxed)) {
ret = rte_thread_create_internal_control(&ifpga_monitor_start_thread,
"ifpga-mon", ifpga_rawdev_gsd_handle, NULL);
if (ret != 0) {
@@ -573,7 +573,8 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 0;
- if (!(__atomic_fetch_sub(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED) - 1) &&
+ if (!(rte_atomic_fetch_sub_explicit(&ifpga_monitor_refcnt, 1,
+ rte_memory_order_relaxed) - 1) &&
ifpga_monitor_start_thread.opaque_id != 0) {
ret = pthread_cancel((pthread_t)ifpga_monitor_start_thread.opaque_id);
if (ret)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 24/46] event/opdl: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (22 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 23/46] raw/ifpga: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 25/46] event/octeontx: " Tyler Retzlaff
` (27 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/opdl/opdl_ring.c | 80 +++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index da5ea02..a86bfb8 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -47,12 +47,12 @@ struct shared_state {
/* Last known minimum sequence number of dependencies, used for multi
* thread operation
*/
- uint32_t available_seq;
+ RTE_ATOMIC(uint32_t) available_seq;
char _pad1[RTE_CACHE_LINE_SIZE * 3];
- uint32_t head; /* Head sequence number (for multi thread operation) */
+ RTE_ATOMIC(uint32_t) head; /* Head sequence number (for multi thread operation) */
char _pad2[RTE_CACHE_LINE_SIZE * 3];
struct opdl_stage *stage; /* back pointer */
- uint32_t tail; /* Tail sequence number */
+ RTE_ATOMIC(uint32_t) tail; /* Tail sequence number */
char _pad3[RTE_CACHE_LINE_SIZE * 2];
} __rte_cache_aligned;
@@ -150,10 +150,10 @@ struct opdl_ring {
available(const struct opdl_stage *s)
{
if (s->threadsafe == true) {
- uint32_t n = __atomic_load_n(&s->shared.available_seq,
- __ATOMIC_ACQUIRE) -
- __atomic_load_n(&s->shared.head,
- __ATOMIC_ACQUIRE);
+ uint32_t n = rte_atomic_load_explicit(&s->shared.available_seq,
+ rte_memory_order_acquire) -
+ rte_atomic_load_explicit(&s->shared.head,
+ rte_memory_order_acquire);
/* Return 0 if available_seq needs to be updated */
return (n <= s->num_slots) ? n : 0;
@@ -169,7 +169,7 @@ struct opdl_ring {
{
uint32_t i;
uint32_t this_tail = s->shared.tail;
- uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+ uint32_t min_seq = rte_atomic_load_explicit(&s->deps[0]->tail, rte_memory_order_acquire);
/* Input stage sequence numbers are greater than the sequence numbers of
* its dependencies so an offset of t->num_slots is needed when
* calculating available slots and also the condition which is used to
@@ -180,16 +180,16 @@ struct opdl_ring {
if (is_input_stage(s)) {
wrap = s->num_slots;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((this_tail - seq) > (this_tail - min_seq))
min_seq = seq;
}
} else {
wrap = 0;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((seq - this_tail) < (min_seq - this_tail))
min_seq = seq;
}
@@ -198,8 +198,8 @@ struct opdl_ring {
if (s->threadsafe == false)
s->available_seq = min_seq + wrap;
else
- __atomic_store_n(&s->shared.available_seq, min_seq + wrap,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.available_seq, min_seq + wrap,
+ rte_memory_order_release);
}
/* Wait until the number of available slots reaches number requested */
@@ -299,7 +299,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -382,18 +382,18 @@ struct opdl_ring {
/* There should be no race condition here. If shared.tail
* matches, no other core can update it until this one does.
*/
- if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&s->shared.tail, rte_memory_order_acquire) ==
tail) {
if (num_entries >= (head - tail)) {
claim_mgr_remove(disclaims);
- __atomic_store_n(&s->shared.tail, head,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, head,
+ rte_memory_order_release);
num_entries -= (head - tail);
} else {
claim_mgr_move_tail(disclaims, num_entries);
- __atomic_store_n(&s->shared.tail,
+ rte_atomic_store_explicit(&s->shared.tail,
num_entries + tail,
- __ATOMIC_RELEASE);
+ rte_memory_order_release);
num_entries = 0;
}
} else if (block == false)
@@ -421,7 +421,7 @@ struct opdl_ring {
opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
false);
- *old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+ *old_head = rte_atomic_load_explicit(&s->shared.head, rte_memory_order_acquire);
while (true) {
bool success;
/* If called by opdl_ring_input(), claim does not need to be
@@ -441,11 +441,10 @@ struct opdl_ring {
if (*num_entries == 0)
return;
- success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+ success = rte_atomic_compare_exchange_weak_explicit(&s->shared.head, old_head,
*old_head + *num_entries,
- true, /* may fail spuriously */
- __ATOMIC_RELEASE, /* memory order on success */
- __ATOMIC_ACQUIRE); /* memory order on fail */
+ rte_memory_order_release, /* memory order on success */
+ rte_memory_order_acquire); /* memory order on fail */
if (likely(success))
break;
rte_pause();
@@ -473,10 +472,11 @@ struct opdl_ring {
/* If another thread started inputting before this one, but hasn't
* finished, we need to wait for it to complete to update the tail.
*/
- rte_wait_until_equal_32(&s->shared.tail, old_head, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&s->shared.tail, old_head,
+ rte_memory_order_acquire);
- __atomic_store_n(&s->shared.tail, old_head + num_entries,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, old_head + num_entries,
+ rte_memory_order_release);
return num_entries;
}
@@ -526,8 +526,8 @@ struct opdl_ring {
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- event = __atomic_load_n(&(ev->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -628,8 +628,8 @@ struct opdl_ring {
num_entries, s->head - old_tail);
num_entries = s->head - old_tail;
}
- __atomic_store_n(&s->shared.tail, num_entries + old_tail,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, num_entries + old_tail,
+ rte_memory_order_release);
}
uint32_t
@@ -658,7 +658,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
@@ -677,7 +677,7 @@ struct opdl_ring {
copy_entries_out(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -756,7 +756,7 @@ struct opdl_ring {
return 0;
}
if (s->threadsafe == false) {
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
s->seq += s->num_claimed;
s->shadow_head = s->head;
s->num_claimed = 0;
@@ -1009,8 +1009,8 @@ struct opdl_ring *
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- event = __atomic_load_n(&(ev_orig->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev_orig->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -1027,9 +1027,9 @@ struct opdl_ring *
if ((event & OPDL_EVENT_MASK) !=
ev_temp) {
- __atomic_store_n(&(ev_orig->event),
- ev_update,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(
+ (uint64_t __rte_atomic *)&ev_orig->event,
+ ev_update, rte_memory_order_release);
ev_updated = true;
}
if (ev_orig->u64 != ev->u64) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 25/46] event/octeontx: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (23 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 24/46] event/opdl: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 26/46] event/dsw: " Tyler Retzlaff
` (26 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/octeontx/timvf_evdev.h | 8 ++++----
drivers/event/octeontx/timvf_worker.h | 36 +++++++++++++++++------------------
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index cef02cd..4bfc3d7 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -126,15 +126,15 @@ enum timvf_clk_src {
struct tim_mem_bucket {
uint64_t first_chunk;
union {
- uint64_t w1;
+ RTE_ATOMIC(uint64_t) w1;
struct {
- uint32_t nb_entry;
+ RTE_ATOMIC(uint32_t) nb_entry;
uint8_t sbt:1;
uint8_t hbt:1;
uint8_t bsk:1;
uint8_t rsvd:5;
- uint8_t lock;
- int16_t chunk_remainder;
+ RTE_ATOMIC(uint8_t) lock;
+ RTE_ATOMIC(int16_t) chunk_remainder;
};
};
uint64_t current_chunk;
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index e4b923e..de9f1b0 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -19,22 +19,22 @@
static inline int16_t
timr_bkt_get_rem(struct tim_mem_bucket *bktp)
{
- return __atomic_load_n(&bktp->chunk_remainder,
- __ATOMIC_ACQUIRE);
+ return rte_atomic_load_explicit(&bktp->chunk_remainder,
+ rte_memory_order_acquire);
}
static inline void
timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_store_n(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline void
timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_fetch_sub(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline uint8_t
@@ -47,14 +47,14 @@
timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
- return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint8_t
@@ -81,34 +81,34 @@
{
/*Clear everything except lock. */
const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
- __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
- __ATOMIC_RELAXED);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
+ rte_memory_order_relaxed);
}
static inline uint64_t
timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
{
const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline void
timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
}
static inline uint32_t
@@ -121,13 +121,13 @@
static inline void
timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
}
static inline void
timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
{
- __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
}
static inline uint64_t
@@ -135,7 +135,7 @@
{
const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
TIM_BUCKET_W1_S_NUM_ENTRIES);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v;
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
}
static inline struct tim_mem_entry *
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 26/46] event/dsw: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (24 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 25/46] event/octeontx: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 27/46] dma/skeleton: " Tyler Retzlaff
` (25 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/dsw/dsw_evdev.h | 6 +++---
drivers/event/dsw/dsw_event.c | 34 +++++++++++++++++-----------------
drivers/event/dsw/dsw_xstats.c | 4 ++--
3 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index d745c89..20431d2 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -227,9 +227,9 @@ struct dsw_port {
struct rte_ring *ctl_in_ring __rte_cache_aligned;
/* Estimate of current port load. */
- int16_t load __rte_cache_aligned;
+ RTE_ATOMIC(int16_t) load __rte_cache_aligned;
/* Estimate of flows currently migrating to this port. */
- int32_t immigration_load __rte_cache_aligned;
+ RTE_ATOMIC(int32_t) immigration_load __rte_cache_aligned;
} __rte_cache_aligned;
struct dsw_queue {
@@ -252,7 +252,7 @@ struct dsw_evdev {
uint8_t num_queues;
int32_t max_inflight;
- int32_t credits_on_loan __rte_cache_aligned;
+ RTE_ATOMIC(int32_t) credits_on_loan __rte_cache_aligned;
};
#define DSW_CTL_PAUS_REQ (0)
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 23488d9..6c17b44 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -33,7 +33,7 @@
}
total_on_loan =
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
available = dsw->max_inflight - total_on_loan;
acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
@@ -45,13 +45,13 @@
* allocation.
*/
new_total_on_loan =
- __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED) + acquired_credits;
+ rte_atomic_fetch_add_explicit(&dsw->credits_on_loan, acquired_credits,
+ rte_memory_order_relaxed) + acquired_credits;
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
- __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, acquired_credits,
+ rte_memory_order_relaxed);
return false;
}
@@ -77,8 +77,8 @@
port->inflight_credits = leave_credits;
- __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, return_credits,
+ rte_memory_order_relaxed);
DSW_LOG_DP_PORT(DEBUG, port->id,
"Returned %d tokens to pool.\n",
@@ -156,19 +156,19 @@
int16_t period_load;
int16_t new_load;
- old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
+ old_load = rte_atomic_load_explicit(&port->load, rte_memory_order_relaxed);
period_load = dsw_port_load_close_period(port, now);
new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
(DSW_OLD_LOAD_WEIGHT+1);
- __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->load, new_load, rte_memory_order_relaxed);
/* The load of the recently immigrated flows should hopefully
* be reflected the load estimate by now.
*/
- __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->immigration_load, 0, rte_memory_order_relaxed);
}
static void
@@ -390,10 +390,10 @@ struct dsw_queue_flow_burst {
for (i = 0; i < dsw->num_ports; i++) {
int16_t measured_load =
- __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].load, rte_memory_order_relaxed);
int32_t immigration_load =
- __atomic_load_n(&dsw->ports[i].immigration_load,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
+ rte_memory_order_relaxed);
int32_t load = measured_load + immigration_load;
load = RTE_MIN(load, DSW_MAX_LOAD);
@@ -523,8 +523,8 @@ struct dsw_queue_flow_burst {
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
- __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
- candidate_flow_load, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&dsw->ports[candidate_port_id].immigration_load,
+ candidate_flow_load, rte_memory_order_relaxed);
return true;
}
@@ -882,7 +882,7 @@ struct dsw_queue_flow_burst {
}
source_port_load =
- __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&source_port->load, rte_memory_order_relaxed);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Load %d is below threshold level %d.\n",
@@ -1301,7 +1301,7 @@ struct dsw_queue_flow_burst {
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
+ rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed) >
source_port->new_event_threshold))
return 0;
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index 2a83a28..f61dfd8 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -48,7 +48,7 @@ struct dsw_xstats_port {
static uint64_t
dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
{
- return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
}
static struct dsw_xstat_dev dsw_dev_xstats[] = {
@@ -126,7 +126,7 @@ struct dsw_xstats_port {
{
int16_t load;
- load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
+ load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
return DSW_LOAD_TO_PERCENT(load);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 27/46] dma/skeleton: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (25 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 26/46] event/dsw: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 28/46] crypto/octeontx: " Tyler Retzlaff
` (24 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/dma/skeleton/skeleton_dmadev.c | 5 +++--
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
index 48f88f9..926c188 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.c
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -142,7 +142,7 @@
else if (desc->op == SKELDMA_OP_FILL)
do_fill(desc);
- __atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&hw->completed_count, 1, rte_memory_order_release);
(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
}
@@ -335,7 +335,8 @@
RTE_SET_USED(vchan);
*status = RTE_DMA_VCHAN_IDLE;
- if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
+ if (hw->submitted_count != rte_atomic_load_explicit(&hw->completed_count,
+ rte_memory_order_acquire)
|| hw->zero_req_count == 0)
*status = RTE_DMA_VCHAN_ACTIVE;
return 0;
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
index c9bf315..3730cbc 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.h
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -81,7 +81,7 @@ struct skeldma_hw {
/* Cache delimiter for cpuwork thread's operation data */
char cache2 __rte_cache_aligned;
volatile uint32_t zero_req_count;
- uint64_t completed_count;
+ RTE_ATOMIC(uint64_t) completed_count;
};
#endif /* SKELETON_DMADEV_H */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 28/46] crypto/octeontx: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (26 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 27/46] dma/skeleton: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 29/46] common/mlx5: " Tyler Retzlaff
` (23 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 947e1be..bafd0c1 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -652,7 +652,7 @@
if (!rsp_info->sched_type)
ssows_head_wait(ws);
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
}
@@ -896,7 +896,7 @@
pcount = pending_queue_level(pqueue, DEFAULT_CMD_QLEN);
/* Ensure pcount isn't read before data lands */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
count = (nb_ops > pcount) ? pcount : nb_ops;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 29/46] common/mlx5: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (27 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 28/46] crypto/octeontx: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 30/46] common/idpf: " Tyler Retzlaff
` (22 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/common/mlx5/linux/mlx5_nl.c | 5 +--
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 ++++-----
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 +++++++++---------
drivers/common/mlx5/mlx5_common_utils.h | 6 ++--
drivers/common/mlx5/mlx5_malloc.c | 58 ++++++++++++++++-----------------
7 files changed, 61 insertions(+), 60 deletions(-)
diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
index 28a1f56..bf6dd19 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -175,10 +175,11 @@ struct mlx5_nl_port_info {
uint16_t state; /**< IB device port state (out). */
};
-uint32_t atomic_sn;
+RTE_ATOMIC(uint32_t) atomic_sn;
/* Generate Netlink sequence number. */
-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)
+#define MLX5_NL_SN_GENERATE (rte_atomic_fetch_add_explicit(&atomic_sn, 1, \
+ rte_memory_order_relaxed) + 1)
/**
* Opens a Netlink socket.
diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 9c80277..14c70ed 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -195,7 +195,7 @@ enum mlx5_cqe_status {
/* Prevent speculative reading of other fields in CQE until
* CQE is valid.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
op_code == MLX5_CQE_REQ_ERR))
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 85ec10d..50922ad 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -35,7 +35,7 @@ struct mlx5_range {
/** Memory region for a mempool. */
struct mlx5_mempool_mr {
struct mlx5_pmd_mr pmd_mr;
- uint32_t refcnt; /**< Number of mempools sharing this MR. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
};
/* Mempool registration. */
@@ -56,11 +56,11 @@ struct mlx5_mempool_reg {
{
struct mlx5_mprq_buf *buf = opaque;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
rte_mempool_put(buf->mp, buf);
- } else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0)) {
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ } else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0)) {
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
rte_mempool_put(buf->mp, buf);
}
}
@@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {
unsigned int i;
for (i = 0; i < mpr->mrs_n; i++)
- __atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {
bool ret = false;
for (i = 0; i < mpr->mrs_n; i++)
- ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0;
+ ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0;
return ret;
}
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index 8789d40..5bdf48a 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {
/* Multi-Packet RQ buffer header. */
struct mlx5_mprq_buf {
struct rte_mempool *mp;
- uint16_t refcnt; /* Atomically accessed refcnt. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
struct rte_mbuf_ext_shared_info shinfos[];
/*
* Shared information per stride.
diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index e69d068..4b95d35 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -81,14 +81,14 @@ struct mlx5_list *
while (entry != NULL) {
if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
if (reuse) {
- ret = __atomic_fetch_add(&entry->ref_cnt, 1,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
+ rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
l_const->name, (void *)entry,
entry->ref_cnt);
} else if (lcore_index < MLX5_LIST_GLOBAL) {
- ret = __atomic_load_n(&entry->ref_cnt,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_load_explicit(&entry->ref_cnt,
+ rte_memory_order_relaxed);
}
if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
return entry;
@@ -151,13 +151,13 @@ struct mlx5_list_entry *
{
struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
- uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
- __ATOMIC_RELAXED);
+ uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
+ rte_memory_order_relaxed);
while (inv_cnt != 0 && entry != NULL) {
struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
- if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
LIST_REMOVE(entry, next);
if (l_const->lcores_share)
l_const->cb_clone_free(l_const->ctx, entry);
@@ -217,7 +217,7 @@ struct mlx5_list_entry *
entry->lcore_idx = (uint32_t)lcore_index;
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
l_const->name, lcore_index,
(void *)entry, entry->ref_cnt);
@@ -254,7 +254,7 @@ struct mlx5_list_entry *
l_inconst->gen_cnt++;
rte_rwlock_write_unlock(&l_inconst->lock);
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
(void *)entry, entry->ref_cnt);
return local_entry;
@@ -285,7 +285,7 @@ struct mlx5_list_entry *
{
struct mlx5_list_entry *gentry = entry->gentry;
- if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
if (entry->lcore_idx == (uint32_t)lcore_idx) {
LIST_REMOVE(entry, next);
@@ -294,23 +294,23 @@ struct mlx5_list_entry *
else
l_const->cb_remove(l_const->ctx, entry);
} else {
- __atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+ 1, rte_memory_order_relaxed);
}
if (!l_const->lcores_share) {
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)entry);
return 0;
}
- if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
rte_rwlock_write_lock(&l_inconst->lock);
if (likely(gentry->ref_cnt == 0)) {
LIST_REMOVE(gentry, next);
rte_rwlock_write_unlock(&l_inconst->lock);
l_const->cb_remove(l_const->ctx, gentry);
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)gentry);
return 0;
@@ -377,7 +377,7 @@ struct mlx5_list_entry *
mlx5_list_get_entry_num(struct mlx5_list *list)
{
MLX5_ASSERT(list);
- return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
}
/********************* Hash List **********************/
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index ae15119..cb4d104 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -29,7 +29,7 @@
*/
struct mlx5_list_entry {
LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
- uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
+ RTE_ATOMIC(uint32_t) ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
uint32_t lcore_idx;
union {
struct mlx5_list_entry *gentry;
@@ -39,7 +39,7 @@ struct mlx5_list_entry {
struct mlx5_list_cache {
LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
- uint32_t inv_cnt; /* Invalid entries counter. */
+ RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
} __rte_cache_aligned;
/**
@@ -111,7 +111,7 @@ struct mlx5_list_const {
struct mlx5_list_inconst {
rte_rwlock_t lock; /* read/write lock. */
volatile uint32_t gen_cnt; /* List modification may update it. */
- volatile uint32_t count; /* number of entries in list. */
+ volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
struct mlx5_list_cache *cache[MLX5_LIST_MAX];
/* Lcore cache, last index is the global cache. */
};
diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
index c58c41d..ef6dabe 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -16,7 +16,7 @@ struct mlx5_sys_mem {
uint32_t init:1; /* Memory allocator initialized. */
uint32_t enable:1; /* System memory select. */
uint32_t reserve:30; /* Reserve. */
- struct rte_memseg_list *last_msl;
+ RTE_ATOMIC(struct rte_memseg_list *) last_msl;
/* last allocated rte memory memseg list. */
#ifdef RTE_LIBRTE_MLX5_DEBUG
uint64_t malloc_sys;
@@ -93,14 +93,14 @@ struct mlx5_sys_mem {
* different with the cached msl.
*/
if (addr && !mlx5_mem_check_msl(addr,
- (struct rte_memseg_list *)__atomic_load_n
- (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
- __atomic_store_n(&mlx5_sys_mem.last_msl,
+ (struct rte_memseg_list *)rte_atomic_load_explicit
+ (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
+ rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
rte_mem_virt2memseg_list(addr),
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
+ rte_memory_order_relaxed);
#endif
}
}
@@ -122,11 +122,11 @@ struct mlx5_sys_mem {
* to check if the memory belongs to rte memory.
*/
if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
- __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+ rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
if (!rte_mem_virt2memseg_list(addr))
return false;
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
#endif
}
return true;
@@ -185,8 +185,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -199,8 +199,8 @@ struct mlx5_sys_mem {
addr = malloc(size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -233,8 +233,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(new_addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -246,8 +246,8 @@ struct mlx5_sys_mem {
new_addr = realloc(addr, size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -259,14 +259,14 @@ struct mlx5_sys_mem {
return;
if (!mlx5_mem_is_rte(addr)) {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
+ rte_memory_order_relaxed);
#endif
mlx5_os_free(addr);
} else {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
+ rte_memory_order_relaxed);
#endif
rte_free(addr);
}
@@ -280,14 +280,14 @@ struct mlx5_sys_mem {
" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
" update:%"PRIi64"",
- __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
#endif
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 30/46] common/idpf: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (28 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 29/46] common/mlx5: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 31/46] common/iavf: " Tyler Retzlaff
` (21 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/common/idpf/idpf_common_device.h | 6 +++---
drivers/common/idpf/idpf_common_rxtx.c | 14 ++++++++------
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 ++++++++--------
4 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 2b94f03..6a44cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -48,7 +48,7 @@ struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
struct virtchnl2_get_capabilities caps;
- volatile uint32_t pend_cmd; /* pending command not finished */
+ volatile RTE_ATOMIC(uint32_t) pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from cp */
uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
@@ -179,8 +179,8 @@ struct idpf_cmd_info {
atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops)
{
uint32_t op_unk = VIRTCHNL2_OP_UNKNOWN;
- bool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ bool ret = rte_atomic_compare_exchange_strong_explicit(&adapter->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
DRV_LOG(ERR, "There is incomplete cmd %d", adapter->pend_cmd);
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 83b131e..b09c58c 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -592,8 +592,8 @@
next_avail = 0;
rx_bufq->nb_rx_hold -= delta;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
return;
@@ -612,8 +612,8 @@
next_avail += nb_refill;
rx_bufq->nb_rx_hold -= nb_refill;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
}
@@ -1093,7 +1093,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(nmb == NULL)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -1203,7 +1204,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index b49b1ed..eeeeed1 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -97,7 +97,7 @@
#define IDPF_RX_SPLIT_BUFQ2_ID 2
struct idpf_rx_stats {
- uint64_t mbuf_alloc_failed;
+ RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
struct idpf_rx_queue {
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index f65e8d5..3b5e124 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -38,8 +38,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -168,8 +168,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
@@ -564,8 +564,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
@@ -638,8 +638,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 31/46] common/iavf: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (29 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 30/46] common/idpf: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 32/46] baseband/acc: " Tyler Retzlaff
` (20 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/common/iavf/iavf_impl.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
index 8919b0e..c0ff301 100644
--- a/drivers/common/iavf/iavf_impl.c
+++ b/drivers/common/iavf/iavf_impl.c
@@ -18,7 +18,7 @@ enum iavf_status
u64 size,
u32 alignment)
{
- static uint64_t iavf_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) iavf_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -26,7 +26,7 @@ enum iavf_status
return IAVF_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "iavf_dma_%" PRIu64,
- __atomic_fetch_add(&iavf_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&iavf_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 32/46] baseband/acc: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (30 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 31/46] common/iavf: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 33/46] net/txgbe: " Tyler Retzlaff
` (19 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 34 deletions(-)
diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
index 4f666e5..ee50b9c 100644
--- a/drivers/baseband/acc/rte_acc100_pmd.c
+++ b/drivers/baseband/acc/rte_acc100_pmd.c
@@ -3673,8 +3673,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3728,8 +3728,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3742,8 +3742,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3755,8 +3755,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d\n",
desc, rsp.val, descs_in_tb, desc->req.numCBs);
@@ -3793,8 +3793,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3846,8 +3846,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3902,8 +3902,8 @@
uint8_t cbs_in_tb = 1, cb_idx = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3919,8 +3919,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3930,8 +3930,8 @@
/* Read remaining CBs if exists */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
desc, rsp.val, cb_idx, cbs_in_tb);
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index 88b1104..f7c54be 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -3119,7 +3119,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
return -1;
@@ -3157,7 +3158,8 @@
struct rte_bbdev_enc_op *op;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3192,7 +3194,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + 1 > max_requested_ops)
return -1;
@@ -3208,7 +3211,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3220,7 +3224,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
@@ -3246,7 +3251,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3290,7 +3296,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3346,7 +3353,8 @@
uint32_t tb_crc_check = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3362,7 +3370,8 @@
/* Check if last CB in TB is ready to dequeue (and thus the whole TB) - checking sdone bit.
* If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3372,7 +3381,8 @@
/* Read remaining CBs if exists. */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
rsp.val, desc->rsp.add_info_0,
@@ -3790,7 +3800,8 @@
struct rte_bbdev_fft_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4116,7 +4127,8 @@
uint8_t descs_in_op, i;
desc = acc_desc_tail(q, dequeued_ops);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4127,7 +4139,8 @@
/* Get last CB. */
last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - 1);
/* Check if last op is ready to dequeue by checking fdone bit. If not exit. */
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -4137,8 +4150,8 @@
for (i = 1; i < descs_in_op - 1; i++) {
last_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
& q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit(
+ (uint64_t __rte_atomic *)last_desc, rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
}
@@ -4154,7 +4167,8 @@
for (i = 0; i < descs_in_op; i++) {
desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 33/46] net/txgbe: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (31 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 32/46] baseband/acc: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 34/46] net/null: " Tyler Retzlaff
` (18 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 12 +++++++-----
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
3 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b75e889..a58f197 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -595,7 +595,7 @@ static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -2834,7 +2834,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev);
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -2859,7 +2859,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
rte_thread_detach(rte_thread_self());
txgbe_dev_setup_link_alarm_handler(dev);
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -2908,7 +2908,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
} else if (hw->phy.media_type == txgbe_media_type_fiber &&
dev->data->dev_conf.intr_conf.lsc != 0) {
txgbe_dev_wait_setup_link_complete(dev, 0);
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the TXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -2918,7 +2919,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
"txgbe-link",
txgbe_dev_setup_link_thread_handler, dev) < 0) {
PMD_DRV_LOG(ERR, "Create link thread failed!");
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 7e8067c..e8f55f7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -372,7 +372,7 @@ struct txgbe_adapter {
/* For RSS reta table update */
uint8_t rss_reta_updated;
- uint32_t link_thread_running;
+ RTE_ATOMIC(uint32_t) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index f1341fb..1abc190 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -206,7 +206,7 @@ static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->device_id = pci_dev->id.device_id;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 34/46] net/null: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (32 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 33/46] net/txgbe: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 35/46] event/dlb2: " Tyler Retzlaff
` (17 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/null/rte_eth_null.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 7c46004..f4ed3b8 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -37,8 +37,8 @@ struct null_queue {
struct rte_mempool *mb_pool;
struct rte_mbuf *dummy_packet;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_options {
@@ -102,7 +102,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -130,7 +130,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -155,7 +155,7 @@ struct pmd_internals {
rte_pktmbuf_free(bufs[i]);
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -178,7 +178,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 35/46] event/dlb2: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (33 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 34/46] net/null: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 36/46] dma/idxd: " Tyler Retzlaff
` (16 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/dlb2/dlb2.c | 34 +++++++++++++++++-----------------
drivers/event/dlb2/dlb2_priv.h | 10 +++++-----
drivers/event/dlb2/dlb2_xstats.c | 2 +-
3 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 628ddef..0b91f03 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1005,7 +1005,7 @@ struct process_local_port_data
}
dlb2->new_event_limit = config->nb_events_limit;
- __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&dlb2->inflights, 0, rte_memory_order_seq_cst);
/* Save number of ports/queues for this event dev */
dlb2->num_ports = config->nb_event_ports;
@@ -2668,10 +2668,10 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
batch_size = credits;
if (likely(credits &&
- __atomic_compare_exchange_n(
+ rte_atomic_compare_exchange_strong_explicit(
qm_port->credit_pool[type],
- &credits, credits - batch_size, false,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
+ &credits, credits - batch_size,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst)))
return batch_size;
else
return 0;
@@ -2687,7 +2687,7 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
/* Replenish credits, saving one quanta for enqueues */
uint16_t val = ev_port->inflight_credits - quanta;
- __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
ev_port->inflight_credits -= val;
}
}
@@ -2696,8 +2696,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_port *ev_port)
{
- uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
- __ATOMIC_SEQ_CST);
+ uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
+ rte_memory_order_seq_cst);
const int num = 1;
if (unlikely(ev_port->inflight_max < sw_inflights)) {
@@ -2719,8 +2719,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
return 1;
}
- __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
+ rte_memory_order_seq_cst);
ev_port->inflight_credits += (credit_update_quanta);
if (ev_port->inflight_credits < num) {
@@ -3234,17 +3234,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_ldb_credits += num;
if (qm_port->cached_ldb_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_LDB_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_ldb_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
@@ -3252,17 +3252,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_dir_credits += num;
if (qm_port->cached_dir_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_DIR_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_dir_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index 31a3bee..46883f2 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -348,7 +348,7 @@ struct dlb2_port {
uint32_t dequeue_depth;
enum dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
- uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) *credit_pool[DLB2_NUM_QUEUE_TYPES];
union {
struct {
uint16_t cached_ldb_credits;
@@ -586,7 +586,7 @@ struct dlb2_eventdev {
uint32_t xstats_count_mode_dev;
uint32_t xstats_count_mode_port;
uint32_t xstats_count;
- uint32_t inflights; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) inflights; /* use __atomic builtins */
uint32_t new_event_limit;
int max_num_events_override;
int num_dir_credits_override;
@@ -624,14 +624,14 @@ struct dlb2_eventdev {
uint16_t max_ldb_credits;
uint16_t max_dir_credits;
/* use __atomic builtins */ /* shared hw cred */
- uint32_t ldb_credit_pool __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) ldb_credit_pool __rte_cache_aligned;
/* use __atomic builtins */ /* shared hw cred */
- uint32_t dir_credit_pool __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) dir_credit_pool __rte_cache_aligned;
};
struct {
uint16_t max_credits;
/* use __atomic builtins */ /* shared hw cred */
- uint32_t credit_pool __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) credit_pool __rte_cache_aligned;
};
};
uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */
diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
index ff15271..22094f3 100644
--- a/drivers/event/dlb2/dlb2_xstats.c
+++ b/drivers/event/dlb2/dlb2_xstats.c
@@ -173,7 +173,7 @@ struct dlb2_xstats_entry {
case nb_events_limit:
return dlb2->new_event_limit;
case inflight_events:
- return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
+ return rte_atomic_load_explicit(&dlb2->inflights, rte_memory_order_seq_cst);
case ldb_pool_size:
return dlb2->num_ldb_credits;
case dir_pool_size:
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 36/46] dma/idxd: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (34 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 35/46] event/dlb2: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 37/46] crypto/ccp: " Tyler Retzlaff
` (15 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index cd41777..537cf9b 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -33,7 +33,7 @@ struct idxd_pci_common {
rte_spinlock_t lk;
uint8_t wq_cfg_sz;
- uint16_t ref_count;
+ RTE_ATOMIC(uint16_t) ref_count;
volatile struct rte_idxd_bar0 *regs;
volatile uint32_t *wq_regs_base;
volatile struct rte_idxd_grpcfg *grp_regs;
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index a78889a..06fa115 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -136,7 +136,8 @@
* the PCI struct
*/
/* NOTE: review for potential ordering optimization */
- is_last_wq = (__atomic_fetch_sub(&idxd->u.pci->ref_count, 1, __ATOMIC_SEQ_CST) == 1);
+ is_last_wq = (rte_atomic_fetch_sub_explicit(&idxd->u.pci->ref_count, 1,
+ rte_memory_order_seq_cst) == 1);
if (is_last_wq) {
/* disable the device */
err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -330,9 +331,9 @@
return ret;
}
qid = rte_dma_get_dev_id_by_name(qname);
- max_qid = __atomic_load_n(
+ max_qid = rte_atomic_load_explicit(
&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
- __ATOMIC_SEQ_CST);
+ rte_memory_order_seq_cst);
/* we have queue 0 done, now configure the rest of the queues */
for (qid = 1; qid < max_qid; qid++) {
@@ -389,7 +390,7 @@
free(idxd.u.pci);
return ret;
}
- __atomic_fetch_add(&idxd.u.pci->ref_count, 1, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&idxd.u.pci->ref_count, 1, rte_memory_order_seq_cst);
}
return 0;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 37/46] crypto/ccp: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (35 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 36/46] dma/idxd: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 38/46] common/cpt: " Tyler Retzlaff
` (14 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/crypto/ccp/ccp_dev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index b7ca3af..41c1422 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -116,15 +116,15 @@ struct ccp_queue *
static inline void
ccp_set_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ (1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline void
ccp_clear_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_and_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ ~(1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline uint32_t
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 38/46] common/cpt: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (36 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 37/46] crypto/ccp: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 39/46] bus/vmbus: " Tyler Retzlaff
` (13 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/common/cpt/cpt_common.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index d70668a..dc79e3a 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -73,7 +73,7 @@ struct cpt_request_info {
const unsigned int qsize)
{
/* Ensure ordering between setting the entry and updating the tail */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
q->tail = (q->tail + cnt) & (qsize - 1);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 39/46] bus/vmbus: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (37 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 38/46] common/cpt: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 40/46] examples: " Tyler Retzlaff
` (12 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/bus/vmbus/vmbus_channel.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index 4d74df3..e96a4eb 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -19,22 +19,23 @@
#include "private.h"
static inline void
-vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+vmbus_sync_set_bit(volatile RTE_ATOMIC(uint32_t) *addr, uint32_t mask)
{
/* Use GCC builtin which atomic does atomic OR operation */
- __atomic_fetch_or(addr, mask, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit(addr, mask, rte_memory_order_seq_cst);
}
static inline void
vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
{
- uint32_t *monitor_addr, monitor_mask;
+ RTE_ATOMIC(uint32_t) *monitor_addr, monitor_mask;
unsigned int trigger_index;
trigger_index = monitor_id / HV_MON_TRIG_LEN;
monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
- monitor_addr = &channel->monitor_page->trigs[trigger_index].pending;
+ monitor_addr =
+ (uint32_t __rte_atomic *)&channel->monitor_page->trigs[trigger_index].pending;
vmbus_sync_set_bit(monitor_addr, monitor_mask);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 40/46] examples: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (38 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 39/46] bus/vmbus: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 41/46] app/dumpcap: " Tyler Retzlaff
` (11 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
examples/bbdev_app/main.c | 13 +++++----
examples/l2fwd-event/l2fwd_common.h | 4 +--
examples/l2fwd-event/l2fwd_event.c | 24 ++++++++--------
examples/l2fwd-jobstats/main.c | 11 ++++----
.../client_server_mp/mp_server/main.c | 6 ++--
examples/server_node_efd/efd_server/main.c | 6 ++--
examples/vhost/main.c | 32 +++++++++++-----------
examples/vhost/main.h | 4 +--
examples/vhost/virtio_net.c | 13 +++++----
examples/vhost_blk/vhost_blk.c | 8 +++---
examples/vm_power_manager/channel_monitor.c | 9 +++---
11 files changed, 68 insertions(+), 62 deletions(-)
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 16599ae..214fdf2 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -165,7 +165,7 @@ struct stats_lcore_params {
.num_dec_cores = 1,
};
-static uint16_t global_exit_flag;
+static RTE_ATOMIC(uint16_t) global_exit_flag;
/* display usage */
static inline void
@@ -277,7 +277,7 @@ uint16_t bbdev_parse_number(const char *mask)
signal_handler(int signum)
{
printf("\nSignal %d received\n", signum);
- __atomic_store_n(&global_exit_flag, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 1, rte_memory_order_relaxed);
}
static void
@@ -321,7 +321,8 @@ uint16_t bbdev_parse_number(const char *mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME &&
- !__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED); count++) {
+ !rte_atomic_load_explicit(&global_exit_flag,
+ rte_memory_order_relaxed); count++) {
memset(&link, 0, sizeof(link));
link_get_err = rte_eth_link_get_nowait(port_id, &link);
@@ -675,7 +676,7 @@ uint16_t bbdev_parse_number(const char *mask)
{
struct stats_lcore_params *stats_lcore = arg;
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
print_stats(stats_lcore);
rte_delay_ms(500);
}
@@ -921,7 +922,7 @@ uint16_t bbdev_parse_number(const char *mask)
const bool run_decoder = (lcore_conf->core_type &
(1 << RTE_BBDEV_OP_TURBO_DEC));
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
if (run_encoder)
run_encoding(lcore_conf);
if (run_decoder)
@@ -1055,7 +1056,7 @@ uint16_t bbdev_parse_number(const char *mask)
.align = alignof(struct rte_mbuf *),
};
- __atomic_store_n(&global_exit_flag, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 0, rte_memory_order_relaxed);
sigret = signal(SIGTERM, signal_handler);
if (sigret == SIG_ERR)
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index 07f84cb..3d2e303 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -61,8 +61,8 @@
/* Per-port statistics struct */
struct l2fwd_port_statistics {
uint64_t dropped;
- uint64_t tx;
- uint64_t rx;
+ RTE_ATOMIC(uint64_t) tx;
+ RTE_ATOMIC(uint64_t) rx;
} __rte_cache_aligned;
/* Event vector attributes */
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 4b5a032..2247202 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -163,8 +163,8 @@
dst_port = rsrc->dst_ports[mbuf->port];
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].rx,
+ 1, rte_memory_order_relaxed);
mbuf->port = dst_port;
if (flags & L2FWD_EVENT_UPDT_MAC)
@@ -179,8 +179,8 @@
rte_event_eth_tx_adapter_txq_set(mbuf, 0);
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].tx,
+ 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -367,8 +367,8 @@
vec->queue = 0;
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbufs[0]->port].rx,
+ vec->nb_elem, rte_memory_order_relaxed);
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (j < vec->nb_elem)
@@ -382,14 +382,14 @@
}
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[vec->port].tx,
+ vec->nb_elem, rte_memory_order_relaxed);
} else {
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].rx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (j < vec->nb_elem)
rte_prefetch0(
@@ -406,9 +406,9 @@
rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].tx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
}
}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 2653db4..9a094ef 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -80,7 +80,7 @@ struct lcore_queue_conf {
struct rte_jobstats idle_job;
struct rte_jobstats_context jobs_context;
- uint16_t stats_read_pending;
+ RTE_ATOMIC(uint16_t) stats_read_pending;
rte_spinlock_t lock;
} __rte_cache_aligned;
/* >8 End of list of queues to be polled for given lcore. */
@@ -151,9 +151,9 @@ struct l2fwd_port_statistics {
uint64_t collection_time = rte_get_timer_cycles();
/* Ask forwarding thread to give us stats. */
- __atomic_store_n(&qconf->stats_read_pending, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&qconf->lock);
- __atomic_store_n(&qconf->stats_read_pending, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 0, rte_memory_order_relaxed);
/* Collect context statistics. */
stats_period = ctx->state_time - ctx->start_time;
@@ -522,8 +522,9 @@ struct l2fwd_port_statistics {
repeats++;
need_manage = qconf->flush_timer.expire < now;
/* Check if we was esked to give a stats. */
- stats_read_pending = __atomic_load_n(&qconf->stats_read_pending,
- __ATOMIC_RELAXED);
+ stats_read_pending = rte_atomic_load_explicit(
+ &qconf->stats_read_pending,
+ rte_memory_order_relaxed);
need_manage |= stats_read_pending;
for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index f54bb8b..ebfc2fe 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -157,12 +157,12 @@ struct client_rx_buf {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
uint32_t status = 0;
/* Only one core should display stats */
- if (__atomic_compare_exchange_n(&display_stats, &status, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &status, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/server_node_efd/efd_server/main.c b/examples/server_node_efd/efd_server/main.c
index fd72882..75ff0ea 100644
--- a/examples/server_node_efd/efd_server/main.c
+++ b/examples/server_node_efd/efd_server/main.c
@@ -177,12 +177,12 @@ struct efd_stats {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
/* Only one core should display stats */
uint32_t display_init = 0;
- if (__atomic_compare_exchange_n(&display_stats, &display_init, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &display_init, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned int sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3fc1b15..4391d88 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1052,10 +1052,10 @@ static unsigned check_ports_num(unsigned nb_ports)
}
if (enable_stats) {
- __atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_total_atomic, 1,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
src_vdev->stats.tx_total++;
src_vdev->stats.tx += ret;
}
@@ -1072,10 +1072,10 @@ static unsigned check_ports_num(unsigned nb_ports)
ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, nr_xmit,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1404,10 +1404,10 @@ static void virtio_tx_offload(struct rte_mbuf *m)
}
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, rx_count,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, enqueue_count,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1832,10 +1832,10 @@ uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
- __ATOMIC_SEQ_CST);
- rx = __atomic_load_n(&vdev->stats.rx_atomic,
- __ATOMIC_SEQ_CST);
+ rx_total = rte_atomic_load_explicit(&vdev->stats.rx_total_atomic,
+ rte_memory_order_seq_cst);
+ rx = rte_atomic_load_explicit(&vdev->stats.rx_atomic,
+ rte_memory_order_seq_cst);
rx_dropped = rx_total - rx;
printf("Statistics for device %d\n"
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 2fcb837..b163955 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -22,8 +22,8 @@
struct device_statistics {
uint64_t tx;
uint64_t tx_total;
- uint64_t rx_atomic;
- uint64_t rx_total_atomic;
+ RTE_ATOMIC(uint64_t) rx_atomic;
+ RTE_ATOMIC(uint64_t) rx_total_atomic;
};
struct vhost_queue {
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 514c8e0..55af6e7 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -198,7 +198,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
+ avail_idx = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire);
start_idx = queue->last_used_idx;
free_entries = avail_idx - start_idx;
count = RTE_MIN(count, free_entries);
@@ -231,7 +232,8 @@
rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
}
- __atomic_fetch_add(&vr->used->idx, count, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, count,
+ rte_memory_order_release);
queue->last_used_idx += count;
rte_vhost_vring_call(dev->vid, queue_id);
@@ -386,8 +388,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
- queue->last_avail_idx;
+ free_entries = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire) - queue->last_avail_idx;
if (free_entries == 0)
return 0;
@@ -442,7 +444,8 @@
queue->last_avail_idx += i;
queue->last_used_idx += i;
- __atomic_fetch_add(&vr->used->idx, i, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, i,
+ rte_memory_order_acq_rel);
rte_vhost_vring_call(dev->vid, queue_id);
diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
index 376f7b8..03f1ac9 100644
--- a/examples/vhost_blk/vhost_blk.c
+++ b/examples/vhost_blk/vhost_blk.c
@@ -85,9 +85,9 @@ struct vhost_blk_ctrlr *
*/
used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
used->idx++;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
vq->id, used->idx, task->req_idx);
@@ -111,12 +111,12 @@ struct vhost_blk_ctrlr *
desc->id = task->buffer_id;
desc->addr = 0;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
if (vq->used_wrap_counter)
desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
else
desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
task->inflight_idx);
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 5fef268..d384c86 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -828,8 +828,9 @@ void channel_monitor_exit(void)
return -1;
uint32_t channel_connected = CHANNEL_MGR_CHANNEL_CONNECTED;
- if (__atomic_compare_exchange_n(&(chan_info->status), &channel_connected,
- CHANNEL_MGR_CHANNEL_PROCESSING, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_connected,
+ CHANNEL_MGR_CHANNEL_PROCESSING, rte_memory_order_relaxed,
+ rte_memory_order_relaxed) == 0)
return -1;
if (pkt->command == RTE_POWER_CPU_POWER) {
@@ -934,8 +935,8 @@ void channel_monitor_exit(void)
* from management thread
*/
uint32_t channel_processing = CHANNEL_MGR_CHANNEL_PROCESSING;
- __atomic_compare_exchange_n(&(chan_info->status), &channel_processing,
- CHANNEL_MGR_CHANNEL_CONNECTED, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_processing,
+ CHANNEL_MGR_CHANNEL_CONNECTED, rte_memory_order_relaxed, rte_memory_order_relaxed);
return 0;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 41/46] app/dumpcap: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (39 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 40/46] examples: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 42/46] app/test: " Tyler Retzlaff
` (10 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/dumpcap/main.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/app/dumpcap/main.c b/app/dumpcap/main.c
index cc0f66b..b25b95e 100644
--- a/app/dumpcap/main.c
+++ b/app/dumpcap/main.c
@@ -51,7 +51,7 @@
/* command line flags */
static const char *progname;
-static bool quit_signal;
+static RTE_ATOMIC(bool) quit_signal;
static bool group_read;
static bool quiet;
static bool use_pcapng = true;
@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv)
static void
signal_handler(int sig_num __rte_unused)
{
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
@@ -490,7 +490,7 @@ static void statistics_loop(void)
printf("%-15s %10s %10s\n",
"Interface", "Received", "Dropped");
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
RTE_ETH_FOREACH_DEV(p) {
if (rte_eth_dev_get_name_by_port(p, name) < 0)
continue;
@@ -528,7 +528,7 @@ static void statistics_loop(void)
static void
monitor_primary(void *arg __rte_unused)
{
- if (__atomic_load_n(&quit_signal, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed))
return;
if (rte_eal_primary_proc_alive(NULL)) {
@@ -536,7 +536,7 @@ static void statistics_loop(void)
} else {
fprintf(stderr,
"Primary process is no longer active, exiting...\n");
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
}
@@ -983,7 +983,7 @@ int main(int argc, char **argv)
show_count(0);
}
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
if (process_ring(out, r) < 0) {
fprintf(stderr, "pcapng file write failed; %s\n",
strerror(errno));
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 42/46] app/test: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (40 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 41/46] app/dumpcap: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 43/46] app/test-eventdev: " Tyler Retzlaff
` (9 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test/test_bpf.c | 46 ++++++++-----
app/test/test_distributor.c | 114 ++++++++++++++++-----------------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++++----
app/test/test_hash_multiwriter.c | 16 ++---
app/test/test_hash_readwrite.c | 74 ++++++++++-----------
app/test/test_hash_readwrite_lf_perf.c | 88 ++++++++++++-------------
app/test/test_lcores.c | 25 ++++----
app/test/test_lpm_perf.c | 14 ++--
app/test/test_mcslock.c | 12 ++--
app/test/test_mempool_perf.c | 9 +--
app/test/test_pflock.c | 13 ++--
app/test/test_pmd_perf.c | 10 +--
app/test/test_rcu_qsbr_perf.c | 114 +++++++++++++++++----------------
app/test/test_ring_perf.c | 11 ++--
app/test/test_ring_stress_impl.h | 10 +--
app/test/test_rwlock.c | 9 +--
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +++----
app/test/test_spinlock.c | 9 +--
app/test/test_stack_perf.c | 12 ++--
app/test/test_threads.c | 33 +++++-----
app/test/test_ticketlock.c | 9 +--
app/test/test_timer.c | 31 +++++----
24 files changed, 378 insertions(+), 343 deletions(-)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index 53e3a31..2e43442 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -39,8 +39,8 @@
*/
struct dummy_offset {
- uint64_t u64;
- uint32_t u32;
+ RTE_ATOMIC(uint64_t) u64;
+ RTE_ATOMIC(uint32_t) u32;
uint16_t u16;
uint8_t u8;
};
@@ -1581,32 +1581,46 @@ struct bpf_test {
memset(&dfe, 0, sizeof(dfe));
rv = 1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = -1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = (int32_t)TEST_FILL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_3;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
}
diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index d2037b7..df871e3 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -47,14 +47,14 @@ struct worker_params {
struct worker_params worker_params;
/* statics - all zero-initialized by default */
-static volatile int quit; /**< general quit variable for all threads */
-static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
-static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
-static volatile unsigned worker_idx;
-static volatile unsigned zero_idx;
+static volatile RTE_ATOMIC(int) quit; /**< general quit variable for all threads */
+static volatile RTE_ATOMIC(int) zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile RTE_ATOMIC(int) zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
+static volatile RTE_ATOMIC(unsigned int) zero_idx;
struct worker_stats {
- volatile unsigned handled_packets;
+ volatile RTE_ATOMIC(unsigned int) handled_packets;
} __rte_cache_aligned;
struct worker_stats worker_stats[RTE_MAX_LCORE];
@@ -66,8 +66,8 @@ struct worker_stats {
{
unsigned i, count = 0;
for (i = 0; i < worker_idx; i++)
- count += __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED);
+ count += rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed);
return count;
}
@@ -77,8 +77,8 @@ struct worker_stats {
{
unsigned int i;
for (i = 0; i < RTE_MAX_LCORE; i++)
- __atomic_store_n(&worker_stats[i].handled_packets, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&worker_stats[i].handled_packets, 0,
+ rte_memory_order_relaxed);
}
/* this is the basic worker function for sanity test
@@ -91,17 +91,17 @@ struct worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -162,8 +162,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with all zero hashes done.\n");
/* pick two flows and check they go correctly */
@@ -189,9 +189,9 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(
+ rte_atomic_load_explicit(
&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_memory_order_relaxed));
printf("Sanity test with two hash values done\n");
}
@@ -218,8 +218,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with non-zero hashes done\n");
rte_mempool_put_bulk(p, (void *)bufs, BURST);
@@ -311,18 +311,18 @@ struct worker_stats {
struct rte_distributor *d = wp->dist;
unsigned int i;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
rte_pktmbuf_free(buf[i]);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
@@ -381,51 +381,51 @@ struct worker_stats {
unsigned int num;
unsigned int zero_id = 0;
unsigned int zero_unset;
- const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
- __ATOMIC_RELAXED);
+ const unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
/* wait for quit single globally, or for worker zero, wait
* for zero_quit */
while (!quit && !(id == zero_id && zero_quit)) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
if (id == zero_id) {
rte_distributor_return_pkt(d, id, NULL, 0);
/* for worker zero, allow it to restart to pick up last packet
* when all workers are shutting down.
*/
- __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 1, rte_memory_order_release);
while (zero_quit)
usleep(100);
- __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 0, rte_memory_order_release);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets,
- num, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets,
+ num, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
}
@@ -491,17 +491,17 @@ struct worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST * 2) {
printf("Line %d: Error, not all packets flushed. "
@@ -560,18 +560,18 @@ struct worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
@@ -596,18 +596,18 @@ struct worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num, i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
*seq_field(buf[i]) += id + 1;
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -679,8 +679,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
/* Sort returned packets by sent order (sequence numbers). */
for (i = 0; i < buf_count; i++) {
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index ca86845..ba3cf26 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -31,7 +31,7 @@
/* static vars - zero initialized by default */
static volatile int quit;
-static volatile unsigned worker_idx;
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
struct worker_stats {
volatile unsigned handled_packets;
@@ -121,7 +121,7 @@ struct worker_stats {
struct rte_distributor *d = arg;
unsigned int num = 0;
int i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
struct rte_mbuf *buf[8] __rte_cache_aligned;
for (i = 0; i < 8; i++)
diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index 9296de2..bae39af 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -53,12 +53,13 @@
#define MAX_LCORES (rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))
-static uint32_t obj_count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) obj_count;
+static RTE_ATOMIC(uint32_t) synchro;
#define WAIT_SYNCHRO_FOR_WORKERS() do { \
if (lcore_self != rte_get_main_lcore()) \
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, \
+ rte_memory_order_relaxed); \
} while(0)
/*
@@ -71,7 +72,8 @@
WAIT_SYNCHRO_FOR_WORKERS();
- __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
+ /* silent the check in the caller */
+ rte_atomic_store_explicit(&obj_count, 1, rte_memory_order_relaxed);
if (rte_eal_init(0, NULL) != -1)
return -1;
@@ -113,7 +115,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
if (rp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -178,7 +180,7 @@
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -244,7 +246,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_hash_create(&hash_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple times simultaneously */
@@ -311,7 +313,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_fbk_hash_create(&fbk_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -376,7 +378,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
if (lpm != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -437,8 +439,8 @@ struct test_case test_cases[] = {
if (pt_case->func == NULL)
return -1;
- __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&obj_count, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -448,7 +450,7 @@ struct test_case test_cases[] = {
rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
}
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
@@ -463,7 +465,7 @@ struct test_case test_cases[] = {
pt_case->clean(lcore_id);
}
- count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
+ count = rte_atomic_load_explicit(&obj_count, rte_memory_order_relaxed);
if (count != 1) {
printf("%s: common object allocated %d times (should be 1)\n",
pt_case->name, count);
diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
index ed9dd41..33d3147 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/app/test/test_hash_multiwriter.c
@@ -43,8 +43,8 @@ struct {
const uint32_t nb_total_tsx_insertion = 4.5*1024*1024;
uint32_t rounded_nb_total_tsx_insertion;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
static int use_htm;
@@ -84,8 +84,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)
tbl_multiwriter_test_params.keys[i]
@@ -166,8 +166,8 @@ struct {
tbl_multiwriter_test_params.found = found;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
/* Get list of enabled cores */
i = 0;
@@ -233,8 +233,8 @@ struct {
printf("No key corrupted during multiwriter insertion.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED)/
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed)/
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf(" cycles per insertion: %llu\n", cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 4997a01..1867376 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -45,14 +45,14 @@ struct {
struct rte_hash *h;
} tbl_rw_test_param;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
-static uint64_t gread_cycles;
-static uint64_t gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
-static uint64_t greads;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrites;
static int
test_hash_readwrite_worker(__rte_unused void *arg)
@@ -110,8 +110,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_rw_test_param.num_insert; i++)
tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
@@ -209,8 +209,8 @@ struct {
int worker_cnt = rte_lcore_count() - 1;
uint32_t tot_insert = 0;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
if (init_params(use_ext, use_htm, use_rw_lf, use_jhash) != 0)
goto err;
@@ -269,8 +269,8 @@ struct {
printf("No key corrupted during read-write test.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
@@ -310,8 +310,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, i, rte_memory_order_relaxed);
return 0;
}
@@ -344,9 +344,9 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rw_test_param.num_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rw_test_param.num_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -369,11 +369,11 @@ struct {
uint64_t start = 0, end = 0;
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
if (init_params(0, use_htm, 0, use_jhash) != 0)
goto err;
@@ -430,10 +430,10 @@ struct {
if (tot_worker_lcore < core_cnt[n] * 2)
goto finish;
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -475,8 +475,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_only[n] = cycles_per_insertion;
printf("Reader only: cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -484,17 +484,17 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->write_only[n] = cycles_per_insertion;
printf("Writer only: cycles per writes: %llu\n",
cycles_per_insertion);
}
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -569,8 +569,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_write_r[n] = cycles_per_insertion;
printf("Read-write cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -578,8 +578,8 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->read_write_w[n] = cycles_per_insertion;
printf("Read-write cycles per writes: %llu\n",
cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite_lf_perf.c b/app/test/test_hash_readwrite_lf_perf.c
index 5d18850..4523985 100644
--- a/app/test/test_hash_readwrite_lf_perf.c
+++ b/app/test/test_hash_readwrite_lf_perf.c
@@ -86,10 +86,10 @@ struct rwc_perf {
struct rte_hash *h;
} tbl_rwc_test_param;
-static uint64_t gread_cycles;
-static uint64_t greads;
-static uint64_t gwrite_cycles;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gwrites;
static volatile uint8_t writer_done;
@@ -651,8 +651,8 @@ struct rwc_perf {
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, read_cnt*loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, read_cnt*loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -724,8 +724,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -742,8 +742,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_hit[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -791,8 +791,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -811,8 +811,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_miss[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -861,8 +861,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -884,8 +884,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_nsp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -935,8 +935,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -958,8 +958,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_sp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1007,8 +1007,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1030,8 +1030,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
}
@@ -1087,9 +1087,9 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n",
rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0,
+ rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1127,10 +1127,10 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles,
- __ATOMIC_RELAXED) /
- __atomic_load_n(&greads,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles,
+ rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads,
+ rte_memory_order_relaxed);
rwc_perf_results->multi_rw[m][k][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n",
@@ -1178,8 +1178,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
write_type = WRITE_NO_KEY_SHIFT;
@@ -1210,8 +1210,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_extbkt[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1280,9 +1280,9 @@ struct rwc_perf {
tbl_rwc_test_param.keys_no_ks + i);
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rwc_test_param.single_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -1328,8 +1328,8 @@ struct rwc_perf {
rwc_core_cnt[n];
printf("\nNumber of writers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
rte_rcu_qsbr_init(rv, RTE_MAX_LCORE);
@@ -1364,8 +1364,8 @@ struct rwc_perf {
rte_eal_mp_wait_lcore();
unsigned long long cycles_per_write_operation =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
rwc_perf_results->writer_add_del[n]
= cycles_per_write_operation;
printf("Cycles per write operation: %llu\n",
diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index 3434a0d..bd5c0dd 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -10,6 +10,7 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_thread.h>
+#include <rte_stdatomic.h>
#include "test.h"
@@ -25,7 +26,7 @@ struct thread_context {
enum { Thread_INIT, Thread_ERROR, Thread_DONE } state;
bool lcore_id_any;
rte_thread_t id;
- unsigned int *registered_count;
+ RTE_ATOMIC(unsigned int) *registered_count;
};
static uint32_t thread_loop(void *arg)
@@ -49,10 +50,10 @@ static uint32_t thread_loop(void *arg)
t->state = Thread_ERROR;
}
/* Report register happened to the control thread. */
- __atomic_fetch_add(t->registered_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(t->registered_count, 1, rte_memory_order_release);
/* Wait for release from the control thread. */
- while (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(t->registered_count, rte_memory_order_acquire) != 0)
sched_yield();
rte_thread_unregister();
lcore_id = rte_lcore_id();
@@ -73,7 +74,7 @@ static uint32_t thread_loop(void *arg)
{
struct thread_context thread_contexts[RTE_MAX_LCORE];
unsigned int non_eal_threads_count;
- unsigned int registered_count;
+ RTE_ATOMIC(unsigned int) registered_count;
struct thread_context *t;
unsigned int i;
int ret;
@@ -93,7 +94,7 @@ static uint32_t thread_loop(void *arg)
}
printf("non-EAL threads count: %u\n", non_eal_threads_count);
/* Wait all non-EAL threads to register. */
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
@@ -109,14 +110,14 @@ static uint32_t thread_loop(void *arg)
if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
non_eal_threads_count++;
printf("non-EAL threads count: %u\n", non_eal_threads_count);
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
}
skip_lcore_any:
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -225,7 +226,7 @@ struct limit_lcore_context {
struct thread_context thread_contexts[2];
unsigned int non_eal_threads_count = 0;
struct limit_lcore_context l[2] = {};
- unsigned int registered_count = 0;
+ RTE_ATOMIC(unsigned int) registered_count = 0;
struct thread_context *t;
void *handle[2] = {};
unsigned int i;
@@ -275,7 +276,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 1 ||
@@ -298,7 +299,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 2 ||
@@ -315,7 +316,7 @@ struct limit_lcore_context {
}
rte_lcore_dump(stdout);
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -337,7 +338,7 @@ struct limit_lcore_context {
cleanup_threads:
/* Release all threads */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
rte_thread_join(t->id, NULL);
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
index 82daf9e..bc4bdde 100644
--- a/app/test/test_lpm_perf.c
+++ b/app/test/test_lpm_perf.c
@@ -22,8 +22,8 @@
struct rte_lpm *lpm;
static struct rte_rcu_qsbr *rv;
static volatile uint8_t writer_done;
-static volatile uint32_t thr_id;
-static uint64_t gwrite_cycles;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
static uint32_t num_writers;
/* LPM APIs are not thread safe, use spinlock */
@@ -362,7 +362,7 @@ static void generate_large_route_rule_table(void)
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -470,7 +470,7 @@ static void generate_large_route_rule_table(void)
total_cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, total_cycles, rte_memory_order_relaxed);
return 0;
@@ -540,9 +540,9 @@ static void generate_large_route_rule_table(void)
reader_f = test_lpm_reader;
writer_done = 0;
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
/* Launch reader threads */
for (i = j; i < num_cores; i++)
@@ -563,7 +563,7 @@ static void generate_large_route_rule_table(void)
printf("Total LPM Adds: %d\n", TOTAL_WRITES);
printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
printf("Average LPM Add/Del: %"PRIu64" cycles\n",
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed)
/ TOTAL_WRITES);
writer_done = 1;
diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c
index 46ff13c..8fcbc11 100644
--- a/app/test/test_mcslock.c
+++ b/app/test/test_mcslock.c
@@ -42,7 +42,7 @@
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_mcslock_per_core(__rte_unused void *arg)
@@ -75,7 +75,7 @@
rte_mcslock_t ml_perf_me;
/* wait synchro */
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -100,14 +100,14 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lock = 1;
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
@@ -116,11 +116,11 @@
printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index 96de347..35f0597 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -88,7 +88,7 @@
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
/* number of objects in one bulk operation (get or put) */
static unsigned n_get_bulk;
@@ -188,7 +188,8 @@ struct mempool_test_stats {
/* wait synchro for workers */
if (lcore_id != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
start_cycles = rte_get_timer_cycles();
@@ -233,7 +234,7 @@ struct mempool_test_stats {
int ret;
unsigned cores_save = cores;
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
/* reset stats */
memset(stats, 0, sizeof(stats));
@@ -258,7 +259,7 @@ struct mempool_test_stats {
}
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
ret = per_lcore_mempool_test(mp);
diff --git a/app/test/test_pflock.c b/app/test/test_pflock.c
index 5f77b15..d989a68 100644
--- a/app/test/test_pflock.c
+++ b/app/test/test_pflock.c
@@ -31,7 +31,7 @@
static rte_pflock_t sl;
static rte_pflock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_pflock_per_core(__rte_unused void *arg)
@@ -69,7 +69,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -99,7 +100,7 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -107,7 +108,7 @@
printf("\nTest with phase-fair lock on single core...\n");
lock = 1;
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -116,12 +117,12 @@
printf("\nPhase-fair test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index f6d97f2..46ae80d 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -537,7 +537,7 @@ enum {
return 0;
}
-static uint64_t start;
+static RTE_ATOMIC(uint64_t) start;
static inline int
poll_burst(void *args)
@@ -575,7 +575,7 @@ enum {
num[portid] = pkt_per_port;
}
- rte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_64((uint64_t *)(uintptr_t)&start, 1, rte_memory_order_acquire);
cur_tsc = rte_rdtsc();
while (total) {
@@ -629,9 +629,9 @@ enum {
/* only when polling first */
if (flags == SC_BURST_POLL_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_relaxed);
else
- __atomic_store_n(&start, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 0, rte_memory_order_relaxed);
/* start polling thread
* if in POLL_FIRST mode, poll once launched;
@@ -655,7 +655,7 @@ enum {
/* only when polling second */
if (flags == SC_BURST_XMIT_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_release);
/* wait for polling finished */
diff_tsc = rte_eal_wait_lcore(lcore);
diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c
index ce88a73..d1bf5c5 100644
--- a/app/test/test_rcu_qsbr_perf.c
+++ b/app/test/test_rcu_qsbr_perf.c
@@ -25,13 +25,15 @@
static uint32_t *hash_data[TOTAL_ENTRY];
static volatile uint8_t writer_done;
static volatile uint8_t all_registered;
-static volatile uint32_t thr_id;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
static struct rte_hash *h;
static char hash_name[8];
-static uint64_t updates, checks;
-static uint64_t update_cycles, check_cycles;
+static RTE_ATOMIC(uint64_t) updates;
+static RTE_ATOMIC(uint64_t) checks;
+static RTE_ATOMIC(uint64_t) update_cycles;
+static RTE_ATOMIC(uint64_t) check_cycles;
/* Scale down results to 1000 operations to support lower
* granularity clocks.
@@ -44,7 +46,7 @@
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -81,8 +83,8 @@
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
/* Make the thread offline */
rte_rcu_qsbr_thread_offline(t[0], thread_id);
@@ -113,8 +115,8 @@
} while (loop_cnt < 20000000);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -130,15 +132,15 @@
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
printf("\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\n",
num_cores - 1);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
if (all_registered == 1)
tmp_num_cores = num_cores - 1;
@@ -168,15 +170,16 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -193,10 +196,10 @@
size_t sz;
unsigned int i, tmp_num_cores;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf Test: %d Readers\n", num_cores);
@@ -220,11 +223,11 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -241,10 +244,10 @@
size_t sz;
unsigned int i;
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: %d Writers ('wait' in qsbr_check == false)\n",
num_cores);
@@ -266,10 +269,11 @@
/* Wait until all readers have exited */
rte_eal_mp_wait_lcore();
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -317,8 +321,8 @@
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
rte_rcu_qsbr_thread_unregister(temp, thread_id);
@@ -389,12 +393,12 @@ static struct rte_hash *init_hash(void)
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\n", num_cores);
@@ -453,8 +457,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
@@ -467,12 +471,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -511,7 +515,7 @@ static struct rte_hash *init_hash(void)
printf("Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\n", num_cores);
- __atomic_store_n(&thr_id, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_relaxed);
if (all_registered == 1)
tmp_num_cores = num_cores;
@@ -570,8 +574,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
/* Wait and check return value from reader threads */
@@ -583,12 +587,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -622,10 +626,10 @@ static struct rte_hash *init_hash(void)
return TEST_SKIPPED;
}
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
num_cores = 0;
RTE_LCORE_FOREACH_WORKER(core_id) {
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index d7c5a4c..6d7a0a8 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -186,7 +186,7 @@ struct thread_params {
void *burst = NULL;
#ifdef RTE_USE_C11_MEM_MODEL
- if (__atomic_fetch_add(&lcore_count, 1, __ATOMIC_RELAXED) + 1 != 2)
+ if (rte_atomic_fetch_add_explicit(&lcore_count, 1, rte_memory_order_relaxed) + 1 != 2)
#else
if (__sync_add_and_fetch(&lcore_count, 1) != 2)
#endif
@@ -320,7 +320,7 @@ struct thread_params {
return 0;
}
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static uint64_t queue_count[RTE_MAX_LCORE];
#define TIME_MS 100
@@ -342,7 +342,8 @@ struct thread_params {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (time_diff < hz * TIME_MS / 1000) {
@@ -397,12 +398,12 @@ struct thread_params {
param.r = r;
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lcore_f(¶m);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_ring_stress_impl.h b/app/test/test_ring_stress_impl.h
index 2dec897..e6b23c0 100644
--- a/app/test/test_ring_stress_impl.h
+++ b/app/test/test_ring_stress_impl.h
@@ -24,7 +24,7 @@ enum {
WRK_CMD_RUN,
};
-static uint32_t wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;
+static RTE_ATOMIC(uint32_t) wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;
/* test run-time in seconds */
static const uint32_t run_time = 60;
@@ -203,7 +203,7 @@ struct ring_elem {
* really releasing any data through 'wrk_cmd' to
* the worker.
*/
- while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) != WRK_CMD_RUN)
+ while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) != WRK_CMD_RUN)
rte_pause();
cl = rte_rdtsc_precise();
@@ -246,7 +246,7 @@ struct ring_elem {
lcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);
- } while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) == WRK_CMD_RUN);
+ } while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) == WRK_CMD_RUN);
cl = rte_rdtsc_precise() - cl;
if (prcs == 0)
@@ -360,12 +360,12 @@ struct ring_elem {
}
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_RUN, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_RUN, rte_memory_order_release);
rte_delay_us(run_time * US_PER_S);
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_STOP, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_STOP, rte_memory_order_release);
/* wait for workers and collect stats. */
mc = rte_lcore_id();
diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index 5079895..f67fc35 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -35,7 +35,7 @@
static rte_rwlock_t sl;
static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
enum {
LC_TYPE_RDLOCK,
@@ -101,7 +101,8 @@ struct try_rwlock_lcore {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -134,12 +135,12 @@ struct try_rwlock_lcore {
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(NULL);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c
index 873bd60..7455bac 100644
--- a/app/test/test_seqlock.c
+++ b/app/test/test_seqlock.c
@@ -22,7 +22,7 @@ struct data {
struct reader {
struct data *data;
- uint8_t stop;
+ RTE_ATOMIC(uint8_t) stop;
};
#define WRITER_RUNTIME 2.0 /* s */
@@ -79,7 +79,7 @@ struct reader {
struct reader *r = arg;
int rc = TEST_SUCCESS;
- while (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&
+ while (rte_atomic_load_explicit(&r->stop, rte_memory_order_relaxed) == 0 &&
rc == TEST_SUCCESS) {
struct data *data = r->data;
bool interrupted;
@@ -115,7 +115,7 @@ struct reader {
static void
reader_stop(struct reader *reader)
{
- __atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&reader->stop, 1, rte_memory_order_relaxed);
}
#define NUM_WRITERS 2 /* main lcore + one worker */
diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c
index c12d52d..010ab82 100644
--- a/app/test/test_service_cores.c
+++ b/app/test/test_service_cores.c
@@ -59,15 +59,15 @@ static int32_t dummy_mt_unsafe_cb(void *args)
* test, because two threads are concurrently in a non-MT safe callback.
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so clear pass flag */
*pass_test = 0;
@@ -86,15 +86,15 @@ static int32_t dummy_mt_safe_cb(void *args)
* that 2 threads are running the callback at the same time: MT safe
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so set pass flag */
*pass_test = 1;
@@ -748,15 +748,15 @@ static int32_t dummy_mt_safe_cb(void *args)
/* retrieve done flag and lock to add/sub */
uint32_t *done = ¶ms[0];
- uint32_t *lock = ¶ms[1];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)¶ms[1];
while (!*done) {
- __atomic_fetch_add(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(lock, 1, rte_memory_order_relaxed);
rte_delay_us(500);
- if (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1)
+ if (rte_atomic_load_explicit(lock, rte_memory_order_relaxed) > 1)
/* pass: second core has simultaneously incremented */
*done = 1;
- __atomic_fetch_sub(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(lock, 1, rte_memory_order_relaxed);
}
return 0;
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 9a481f2..a29405a 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -48,7 +48,7 @@
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_spinlock_per_core(__rte_unused void *arg)
@@ -110,7 +110,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -149,11 +150,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c
index c5e1caa..3f17a26 100644
--- a/app/test/test_stack_perf.c
+++ b/app/test/test_stack_perf.c
@@ -23,7 +23,7 @@
*/
static volatile unsigned int bulk_sizes[] = {8, MAX_BURST};
-static uint32_t lcore_barrier;
+static RTE_ATOMIC(uint32_t) lcore_barrier;
struct lcore_pair {
unsigned int c1;
@@ -143,8 +143,8 @@ struct thread_args {
s = args->s;
size = args->sz;
- __atomic_fetch_sub(&lcore_barrier, 1, __ATOMIC_RELAXED);
- rte_wait_until_equal_32(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&lcore_barrier, 1, rte_memory_order_relaxed);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&lcore_barrier, 0, rte_memory_order_relaxed);
uint64_t start = rte_rdtsc();
@@ -173,7 +173,7 @@ struct thread_args {
unsigned int i;
for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
- __atomic_store_n(&lcore_barrier, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 2, rte_memory_order_relaxed);
args[0].sz = args[1].sz = bulk_sizes[i];
args[0].s = args[1].s = s;
@@ -206,7 +206,7 @@ struct thread_args {
int cnt = 0;
double avg;
- __atomic_store_n(&lcore_barrier, n, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, n, rte_memory_order_relaxed);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (++cnt >= n)
@@ -300,7 +300,7 @@ struct thread_args {
struct lcore_pair cores;
struct rte_stack *s;
- __atomic_store_n(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 0, rte_memory_order_relaxed);
s = rte_stack_create(STACK_NAME, STACK_SIZE, rte_socket_id(), flags);
if (s == NULL) {
diff --git a/app/test/test_threads.c b/app/test/test_threads.c
index 4ac3f26..6d6881a 100644
--- a/app/test/test_threads.c
+++ b/app/test/test_threads.c
@@ -6,12 +6,13 @@
#include <rte_thread.h>
#include <rte_debug.h>
+#include <rte_stdatomic.h>
#include "test.h"
RTE_LOG_REGISTER(threads_logtype_test, test.threads, INFO);
-static uint32_t thread_id_ready;
+static RTE_ATOMIC(uint32_t) thread_id_ready;
static uint32_t
thread_main(void *arg)
@@ -19,9 +20,9 @@
if (arg != NULL)
*(rte_thread_t *)arg = rte_thread_self();
- __atomic_store_n(&thread_id_ready, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 1, rte_memory_order_release);
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 1)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 1)
;
return 0;
@@ -37,13 +38,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
@@ -61,13 +62,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main,
&thread_main_id) == 0, "Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_detach(thread_id) == 0,
"Failed to detach thread.");
@@ -85,7 +86,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
priority = RTE_THREAD_PRIORITY_NORMAL;
@@ -121,7 +122,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Priority set mismatches priority get");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -137,7 +138,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset0) == 0,
@@ -190,7 +191,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes affinity thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset1) == 0,
@@ -198,7 +199,7 @@
RTE_TEST_ASSERT(memcmp(&cpuset0, &cpuset1, sizeof(rte_cpuset_t)) == 0,
"Failed to apply affinity attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -219,7 +220,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes priority thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_priority(thread_id, &priority) == 0,
@@ -227,7 +228,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Failed to apply priority attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -243,13 +244,13 @@
thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
index 1fbbedb..9b6b584 100644
--- a/app/test/test_ticketlock.c
+++ b/app/test/test_ticketlock.c
@@ -48,7 +48,7 @@
static rte_ticketlock_recursive_t tlr;
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_ticketlock_per_core(__rte_unused void *arg)
@@ -111,7 +111,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcore_count[lcore] < MAX_LOOP) {
@@ -153,11 +154,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index cac8fc0..dc15a80 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -202,7 +202,7 @@ struct mytimerinfo {
/* Need to synchronize worker lcores through multiple steps. */
enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
-static uint16_t lcore_state[RTE_MAX_LCORE];
+static RTE_ATOMIC(uint16_t) lcore_state[RTE_MAX_LCORE];
static void
main_init_workers(void)
@@ -210,7 +210,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_WAITING, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_WAITING,
+ rte_memory_order_relaxed);
}
}
@@ -220,10 +221,12 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_RUN_SIGNAL, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL,
+ rte_memory_order_release);
}
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_RUNNING,
+ rte_memory_order_acquire);
}
}
@@ -233,7 +236,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_FINISHED,
+ rte_memory_order_acquire);
}
}
@@ -242,8 +246,10 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- rte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, __ATOMIC_ACQUIRE);
- __atomic_store_n(&lcore_state[lcore_id], WORKER_RUNNING, __ATOMIC_RELEASE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[lcore_id], WORKER_RUN_SIGNAL,
+ rte_memory_order_acquire);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING,
+ rte_memory_order_release);
}
static void
@@ -251,7 +257,8 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- __atomic_store_n(&lcore_state[lcore_id], WORKER_FINISHED, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED,
+ rte_memory_order_release);
}
@@ -277,12 +284,12 @@ struct mytimerinfo {
unsigned int lcore_id = rte_lcore_id();
unsigned int main_lcore = rte_get_main_lcore();
int32_t my_collisions = 0;
- static uint32_t collisions;
+ static RTE_ATOMIC(uint32_t) collisions;
if (lcore_id == main_lcore) {
cb_count = 0;
test_failed = 0;
- __atomic_store_n(&collisions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&collisions, 0, rte_memory_order_relaxed);
timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
if (timers == NULL) {
printf("Test Failed\n");
@@ -310,7 +317,7 @@ struct mytimerinfo {
my_collisions++;
}
if (my_collisions != 0)
- __atomic_fetch_add(&collisions, my_collisions, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&collisions, my_collisions, rte_memory_order_relaxed);
/* wait long enough for timers to expire */
rte_delay_ms(100);
@@ -324,7 +331,7 @@ struct mytimerinfo {
/* now check that we get the right number of callbacks */
if (lcore_id == main_lcore) {
- my_collisions = __atomic_load_n(&collisions, __ATOMIC_RELAXED);
+ my_collisions = rte_atomic_load_explicit(&collisions, rte_memory_order_relaxed);
if (my_collisions != 0)
printf("- %d timer reset collisions (OK)\n", my_collisions);
rte_timer_manage();
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 43/46] app/test-eventdev: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (41 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 42/46] app/test: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 44/46] app/test-crypto-perf: " Tyler Retzlaff
` (8 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-eventdev/test_order_atq.c | 4 ++--
app/test-eventdev/test_order_common.c | 5 +++--
app/test-eventdev/test_order_common.h | 8 ++++----
app/test-eventdev/test_order_queue.c | 4 ++--
app/test-eventdev/test_perf_common.h | 6 +++---
5 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 2fee4b4..128d3f2 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index a9894c6..0fceace 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -189,7 +189,7 @@
evt_err("failed to allocate t->expected_flow_seq memory");
goto exp_nomem;
}
- __atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
t->err = false;
t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
@@ -296,7 +296,8 @@
while (t->err == false) {
uint64_t new_cycles = rte_get_timer_cycles();
- int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
+ int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
+ rte_memory_order_relaxed);
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index 1507265..65878d1 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -48,7 +48,7 @@ struct test_order {
* The atomic_* is an expensive operation,Since it is a functional test,
* We are using the atomic_ operation to reduce the code complexity.
*/
- uint64_t outstand_pkts;
+ RTE_ATOMIC(uint64_t) outstand_pkts;
enum evt_test_result result;
uint32_t nb_flows;
uint64_t nb_pkts;
@@ -95,7 +95,7 @@ struct test_order {
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
- uint64_t *const outstand_pkts)
+ RTE_ATOMIC(uint64_t) *const outstand_pkts)
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
@@ -113,7 +113,7 @@ struct test_order {
*/
expected_flow_seq[flow]++;
rte_pktmbuf_free(ev->mbuf);
- __atomic_fetch_sub(outstand_pkts, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -132,7 +132,7 @@ struct test_order {
const uint8_t port = w->port_id;\
const uint32_t nb_flows = t->nb_flows;\
uint32_t *expected_flow_seq = t->expected_flow_seq;\
- uint64_t *outstand_pkts = &t->outstand_pkts;\
+ RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
if (opt->verbose_level > 1)\
printf("%s(): lcore %d dev_id %d port=%d\n",\
__func__, rte_lcore_id(), dev_id, port)
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 80eaea5..a282ab2 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index 2b4f572..7f7c823 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -225,7 +225,7 @@ struct perf_elt {
* stored before updating the number of
* processed packets for worker lcores
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -270,7 +270,7 @@ struct perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -325,7 +325,7 @@ struct perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts += vec->nb_elem;
if (enable_fwd_latency) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 44/46] app/test-crypto-perf: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (42 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 43/46] app/test-eventdev: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 45/46] app/test-compress-perf: " Tyler Retzlaff
` (7 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-crypto-perf/cperf_test_latency.c | 6 +++---
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +++++-----
app/test-crypto-perf/cperf_test_throughput.c | 10 +++++-----
app/test-crypto-perf/cperf_test_verify.c | 10 +++++-----
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 99b7d7c..b8ad6bf 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -136,7 +136,7 @@ struct priv_op_data {
uint32_t imix_idx = 0;
int ret = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
if (ctx == NULL)
return 0;
@@ -341,8 +341,8 @@ struct priv_op_data {
uint16_t exp = 0;
if (ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
"cycles, time (us)");
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 4a60f6d..7191d99 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -396,7 +396,7 @@ struct pmd_cyclecount_state {
state.lcore = rte_lcore_id();
state.linearize = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
static bool warmup = true;
/*
@@ -443,8 +443,8 @@ struct pmd_cyclecount_state {
uint16_t exp = 0;
if (!opts->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
@@ -460,8 +460,8 @@ struct pmd_cyclecount_state {
state.cycles_per_enq,
state.cycles_per_deq);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index e3d266d..c0891e7 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -107,7 +107,7 @@ struct cperf_throughput_ctx {
uint8_t burst_size_idx = 0;
uint32_t imix_idx = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
@@ -277,8 +277,8 @@ struct cperf_throughput_ctx {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst Size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -298,8 +298,8 @@ struct cperf_throughput_ctx {
throughput_gbps,
cycles_per_packet);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 3548509..222c7a1 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -216,7 +216,7 @@ struct cperf_op_result {
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
uint64_t ops_failed = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint64_t i;
uint16_t ops_unused = 0;
@@ -370,8 +370,8 @@ struct cperf_op_result {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -388,8 +388,8 @@ struct cperf_op_result {
ops_deqd_failed,
ops_failed);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore id, Buffer Size(B), "
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Failed Ops\n");
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 45/46] app/test-compress-perf: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (43 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 44/46] app/test-crypto-perf: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-20 20:51 ` [PATCH 46/46] app/test-bbdev: " Tyler Retzlaff
` (6 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 ++--
app/test-compress-perf/comp_perf_test_throughput.c | 10 +++++-----
app/test-compress-perf/comp_perf_test_verify.c | 6 +++---
4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/app/test-compress-perf/comp_perf_test_common.h b/app/test-compress-perf/comp_perf_test_common.h
index d039e5a..085e269 100644
--- a/app/test-compress-perf/comp_perf_test_common.h
+++ b/app/test-compress-perf/comp_perf_test_common.h
@@ -14,7 +14,7 @@ struct cperf_mem_resources {
uint16_t qp_id;
uint8_t lcore_id;
- uint16_t print_info_once;
+ RTE_ATOMIC(uint16_t) print_info_once;
uint32_t total_bufs;
uint8_t *compressed_data;
diff --git a/app/test-compress-perf/comp_perf_test_cyclecount.c b/app/test-compress-perf/comp_perf_test_cyclecount.c
index 4d336ec..64e8faa 100644
--- a/app/test-compress-perf/comp_perf_test_cyclecount.c
+++ b/app/test-compress-perf/comp_perf_test_cyclecount.c
@@ -498,8 +498,8 @@ struct cperf_cyclecount_ctx {
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
diff --git a/app/test-compress-perf/comp_perf_test_throughput.c b/app/test-compress-perf/comp_perf_test_throughput.c
index 1f7072d..089d19c 100644
--- a/app/test-compress-perf/comp_perf_test_throughput.c
+++ b/app/test-compress-perf/comp_perf_test_throughput.c
@@ -336,7 +336,7 @@
struct cperf_benchmark_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->ver.options;
uint32_t lcore = rte_lcore_id();
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
int i, ret = EXIT_SUCCESS;
ctx->ver.mem.lcore_id = lcore;
@@ -345,8 +345,8 @@
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
@@ -413,8 +413,8 @@
}
exp = 0;
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("\n%12s%6s%12s%17s%15s%16s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]",
"Comp [Gbps]", "Decomp [Gbps]");
diff --git a/app/test-compress-perf/comp_perf_test_verify.c b/app/test-compress-perf/comp_perf_test_verify.c
index 7bd1807..09d97c5 100644
--- a/app/test-compress-perf/comp_perf_test_verify.c
+++ b/app/test-compress-perf/comp_perf_test_verify.c
@@ -396,7 +396,7 @@
struct cperf_verify_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->options;
int ret = EXIT_SUCCESS;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint32_t lcore = rte_lcore_id();
uint16_t exp = 0;
@@ -452,8 +452,8 @@
test_data->input_data_sz * 100;
if (!ctx->silent) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("%12s%6s%12s%17s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]");
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH 46/46] app/test-bbdev: use rte stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (44 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 45/46] app/test-compress-perf: " Tyler Retzlaff
@ 2024-03-20 20:51 ` Tyler Retzlaff
2024-03-21 15:33 ` [PATCH 00/46] use " Stephen Hemminger
` (5 subsequent siblings)
51 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-20 20:51 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++++++++++++----------------
1 file changed, 110 insertions(+), 73 deletions(-)
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index dcce00a..9694ed3 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -144,7 +144,7 @@ struct test_op_params {
uint16_t num_to_process;
uint16_t num_lcores;
int vector_mask;
- uint16_t sync;
+ RTE_ATOMIC(uint16_t) sync;
struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
};
@@ -159,9 +159,9 @@ struct thread_params {
uint8_t iter_count;
double iter_average;
double bler;
- uint16_t nb_dequeued;
- int16_t processing_status;
- uint16_t burst_sz;
+ RTE_ATOMIC(uint16_t) nb_dequeued;
+ RTE_ATOMIC(int16_t) processing_status;
+ RTE_ATOMIC(uint16_t) burst_sz;
struct test_op_params *op_params;
struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
@@ -3195,56 +3195,64 @@ typedef int (test_case_function)(struct active_device *ad,
}
if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
printf(
"Dequeue interrupt handler called for incorrect event!\n");
return;
}
- burst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);
+ burst_sz = rte_atomic_load_explicit(&tp->burst_sz, rte_memory_order_relaxed);
num_ops = tp->op_params->num_to_process;
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
deq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,
&tp->fft_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
deq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
&tp->mldts_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else /*RTE_BBDEV_OP_TURBO_ENC*/
deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
if (deq < burst_sz) {
printf(
"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
burst_sz, deq);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
- if (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) + deq < num_ops) {
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
return;
}
@@ -3288,7 +3296,8 @@ typedef int (test_case_function)(struct active_device *ad,
if (ret) {
printf("Buffers validation failed\n");
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
}
switch (test_vector.op_type) {
@@ -3315,7 +3324,8 @@ typedef int (test_case_function)(struct active_device *ad,
break;
default:
printf("Unknown op type: %d\n", test_vector.op_type);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
@@ -3324,7 +3334,7 @@ typedef int (test_case_function)(struct active_device *ad,
tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
((double)total_time / (double)rte_get_tsc_hz());
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
}
static int
@@ -3362,10 +3372,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3415,15 +3426,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3459,10 +3472,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3506,15 +3520,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3549,10 +3565,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3592,15 +3609,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3636,10 +3655,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3681,15 +3701,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3725,10 +3747,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3769,15 +3792,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3811,10 +3836,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, num_to_process);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_to_process);
@@ -3851,15 +3877,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3894,7 +3922,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4013,7 +4042,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4148,7 +4178,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4271,7 +4302,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4402,7 +4434,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4503,7 +4536,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4604,7 +4638,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4702,7 +4737,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4898,7 +4934,7 @@ typedef int (test_case_function)(struct active_device *ad,
else
return TEST_SKIPPED;
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -4921,7 +4957,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = bler_function(&t_params[0]);
/* Main core is always used */
@@ -5024,7 +5060,7 @@ typedef int (test_case_function)(struct active_device *ad,
throughput_function = throughput_pmd_lcore_enc;
}
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -5047,7 +5083,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = throughput_function(&t_params[0]);
/* Main core is always used */
@@ -5077,29 +5113,30 @@ typedef int (test_case_function)(struct active_device *ad,
* Wait for main lcore operations.
*/
tp = &t_params[0];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+ (rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed) !=
TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed);
/* Wait for worker lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp = &t_params[used_cores];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
- TEST_FAILED))
+ (rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed) != TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed);
}
/* Print throughput if test passed */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH 00/46] use stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (45 preceding siblings ...)
2024-03-20 20:51 ` [PATCH 46/46] app/test-bbdev: " Tyler Retzlaff
@ 2024-03-21 15:33 ` Stephen Hemminger
2024-03-21 16:22 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (4 subsequent siblings)
51 siblings, 1 reply; 300+ messages in thread
From: Stephen Hemminger @ 2024-03-21 15:33 UTC (permalink / raw)
To: Tyler Retzlaff
Cc: dev, Mattias Rönnblom, Morten Brørup,
Abdullah Sevincer, Ajit Khaparde, Alok Prasad, Anatoly Burakov,
Andrew Rybchenko, Anoob Joseph, Bruce Richardson, Byron Marohn,
Chenbo Xia, Chengwen Feng, Ciara Loftus, Ciara Power,
Dariusz Sosnowski, David Hunt, Devendra Singh Rawat,
Erik Gabriel Carrillo, Guoyang Zhou, Harman Kalra,
Harry van Haaren, Honnappa Nagarahalli, Jakub Grajciar,
Jerin Jacob, Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai,
Jingjing Wu, Joshua Washington, Joyce Kong, Junfeng Guo,
Kevin Laatz, Konstantin Ananyev, Liang Ma, Long Li,
Maciej Czekaj, Matan Azrad, Maxime Coquelin, Nicolas Chautru,
Ori Kam, Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy,
Reshma Pattan, Rosen Xu, Ruifeng Wang, Rushil Gupta,
Sameh Gobriel, Sivaprasad Tummala, Somnath Kotur, Suanming Mou,
Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang,
Yipeng Wang, Yisen Zhuang, Yuying Zhang, Ziyang Xuan
On Wed, 20 Mar 2024 13:50:46 -0700
Tyler Retzlaff <roretzla@linux.microsoft.com> wrote:
> This series converts all non-generic built atomics to use the rte_atomic
> macros that allow optional enablement of standard C11 atomics.
>
> Use of generic atomics for non-scalar types are not converted in this
> change and will be evaluated as a part of a separate series.
>
> Note if this series ends up requiring too much rebasing due to tree
> churn before it is merged i will break it up into smaller series.
Maybe a coccinelle script to automate this and check/fix future usages?
Series-acked-by: Stephen Hemminger <stephen@networkplumber.org>
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH 00/46] use stdatomic API
2024-03-21 15:33 ` [PATCH 00/46] use " Stephen Hemminger
@ 2024-03-21 16:22 ` Tyler Retzlaff
0 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 16:22 UTC (permalink / raw)
To: Stephen Hemminger
Cc: dev, Mattias Rönnblom, Morten Brørup,
Abdullah Sevincer, Ajit Khaparde, Alok Prasad, Anatoly Burakov,
Andrew Rybchenko, Anoob Joseph, Bruce Richardson, Byron Marohn,
Chenbo Xia, Chengwen Feng, Ciara Loftus, Ciara Power,
Dariusz Sosnowski, David Hunt, Devendra Singh Rawat,
Erik Gabriel Carrillo, Guoyang Zhou, Harman Kalra,
Harry van Haaren, Honnappa Nagarahalli, Jakub Grajciar,
Jerin Jacob, Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai,
Jingjing Wu, Joshua Washington, Joyce Kong, Junfeng Guo,
Kevin Laatz, Konstantin Ananyev, Liang Ma, Long Li,
Maciej Czekaj, Matan Azrad, Maxime Coquelin, Nicolas Chautru,
Ori Kam, Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy,
Reshma Pattan, Rosen Xu, Ruifeng Wang, Rushil Gupta,
Sameh Gobriel, Sivaprasad Tummala, Somnath Kotur, Suanming Mou,
Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang,
Yipeng Wang, Yisen Zhuang, Yuying Zhang, Ziyang Xuan
On Thu, Mar 21, 2024 at 08:33:23AM -0700, Stephen Hemminger wrote:
> On Wed, 20 Mar 2024 13:50:46 -0700
> Tyler Retzlaff <roretzla@linux.microsoft.com> wrote:
>
> > This series converts all non-generic built atomics to use the rte_atomic
> > macros that allow optional enablement of standard C11 atomics.
> >
> > Use of generic atomics for non-scalar types are not converted in this
> > change and will be evaluated as a part of a separate series.
> >
> > Note if this series ends up requiring too much rebasing due to tree
> > churn before it is merged i will break it up into smaller series.
>
> Maybe a coccinelle script to automate this and check/fix future usages?
it also isn't strictly a 1:1 replacement so some __atomic_ could be text
replaced sometimes you need to evaluate arguments on e.g. compare
exchange to pick the right macro, sometimes there are generic vs scalar
version to deal with.
we already have a checkpatches check but right now it is (I think) being
ignored when merging changes to drivers that have not been converted. so
if we get things converted there shouldn't be any further reason to
accept series that are being flagged by checkpatches.
>
> Series-acked-by: Stephen Hemminger <stephen@networkplumber.org>
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 00/45] use stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (46 preceding siblings ...)
2024-03-21 15:33 ` [PATCH 00/46] use " Stephen Hemminger
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 01/45] net/mlx5: use rte " Tyler Retzlaff
` (44 more replies)
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (3 subsequent siblings)
51 siblings, 45 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
This series converts all non-generic built atomics to use the rte_atomic
macros that allow optional enablement of standard C11 atomics.
Use of generic atomics for non-scalar types are not converted in this
change and will be evaluated as a part of a separate series.
Note if this series ends up requiring too much rebasing due to tree
churn before it is merged i will break it up into smaller series.
v2:
* drop the net/sfc driver from the series. the sfc driver
uses generic __atomic_store not handled by the current macros.
the cases where generic __atomic_xxx are used on objects that
can't be accepted by __atomic_xxx_n will be addressed in a
separate series.
Tyler Retzlaff (45):
net/mlx5: use rte stdatomic API
net/ixgbe: use rte stdatomic API
net/iavf: use rte stdatomic API
net/ice: use rte stdatomic API
net/i40e: use rte stdatomic API
net/hns3: use rte stdatomic API
net/bnxt: use rte stdatomic API
net/cpfl: use rte stdatomic API
net/af_xdp: use rte stdatomic API
net/octeon_ep: use rte stdatomic API
net/octeontx: use rte stdatomic API
net/cxgbe: use rte stdatomic API
net/gve: use rte stdatomic API
net/memif: use rte stdatomic API
net/thunderx: use rte stdatomic API
net/virtio: use rte stdatomic API
net/hinic: use rte stdatomic API
net/idpf: use rte stdatomic API
net/qede: use rte stdatomic API
net/ring: use rte stdatomic API
vdpa/mlx5: use rte stdatomic API
raw/ifpga: use rte stdatomic API
event/opdl: use rte stdatomic API
event/octeontx: use rte stdatomic API
event/dsw: use rte stdatomic API
dma/skeleton: use rte stdatomic API
crypto/octeontx: use rte stdatomic API
common/mlx5: use rte stdatomic API
common/idpf: use rte stdatomic API
common/iavf: use rte stdatomic API
baseband/acc: use rte stdatomic API
net/txgbe: use rte stdatomic API
net/null: use rte stdatomic API
event/dlb2: use rte stdatomic API
dma/idxd: use rte stdatomic API
crypto/ccp: use rte stdatomic API
common/cpt: use rte stdatomic API
bus/vmbus: use rte stdatomic API
examples: use rte stdatomic API
app/dumpcap: use rte stdatomic API
app/test: use rte stdatomic API
app/test-eventdev: use rte stdatomic API
app/test-crypto-perf: use rte stdatomic API
app/test-compress-perf: use rte stdatomic API
app/test-bbdev: use rte stdatomic API
app/dumpcap/main.c | 12 +-
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++--------
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 +-
app/test-compress-perf/comp_perf_test_throughput.c | 10 +-
app/test-compress-perf/comp_perf_test_verify.c | 6 +-
app/test-crypto-perf/cperf_test_latency.c | 6 +-
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +-
app/test-crypto-perf/cperf_test_throughput.c | 10 +-
app/test-crypto-perf/cperf_test_verify.c | 10 +-
app/test-eventdev/test_order_atq.c | 4 +-
app/test-eventdev/test_order_common.c | 5 +-
app/test-eventdev/test_order_common.h | 8 +-
app/test-eventdev/test_order_queue.c | 4 +-
app/test-eventdev/test_perf_common.h | 6 +-
app/test/test_bpf.c | 46 ++++--
app/test/test_distributor.c | 114 ++++++-------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++--
app/test/test_hash_multiwriter.c | 16 +-
app/test/test_hash_readwrite.c | 74 ++++-----
app/test/test_hash_readwrite_lf_perf.c | 88 +++++-----
app/test/test_lcores.c | 25 +--
app/test/test_lpm_perf.c | 14 +-
app/test/test_mcslock.c | 12 +-
app/test/test_mempool_perf.c | 9 +-
app/test/test_pflock.c | 13 +-
app/test/test_pmd_perf.c | 10 +-
app/test/test_rcu_qsbr_perf.c | 114 ++++++-------
app/test/test_ring_perf.c | 11 +-
app/test/test_ring_stress_impl.h | 10 +-
app/test/test_rwlock.c | 9 +-
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +--
app/test/test_spinlock.c | 9 +-
app/test/test_stack_perf.c | 12 +-
app/test/test_threads.c | 33 ++--
app/test/test_ticketlock.c | 9 +-
app/test/test_timer.c | 31 ++--
drivers/baseband/acc/rte_acc100_pmd.c | 36 ++--
drivers/baseband/acc/rte_vrb_pmd.c | 46 ++++--
drivers/bus/vmbus/vmbus_channel.c | 9 +-
drivers/common/cpt/cpt_common.h | 2 +-
drivers/common/iavf/iavf_impl.c | 4 +-
drivers/common/idpf/idpf_common_device.h | 6 +-
drivers/common/idpf/idpf_common_rxtx.c | 14 +-
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 +-
drivers/common/mlx5/linux/mlx5_nl.c | 5 +-
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 +-
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 ++--
drivers/common/mlx5/mlx5_common_utils.h | 6 +-
drivers/common/mlx5/mlx5_malloc.c | 58 +++----
drivers/crypto/ccp/ccp_dev.c | 8 +-
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 +-
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +-
drivers/dma/skeleton/skeleton_dmadev.c | 5 +-
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
drivers/event/dlb2/dlb2.c | 34 ++--
drivers/event/dlb2/dlb2_priv.h | 10 +-
drivers/event/dlb2/dlb2_xstats.c | 2 +-
drivers/event/dsw/dsw_evdev.h | 6 +-
drivers/event/dsw/dsw_event.c | 34 ++--
drivers/event/dsw/dsw_xstats.c | 4 +-
drivers/event/octeontx/timvf_evdev.h | 8 +-
drivers/event/octeontx/timvf_worker.h | 36 ++--
drivers/event/opdl/opdl_ring.c | 80 ++++-----
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 ++-
drivers/net/bnxt/bnxt_cpr.h | 4 +-
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 +-
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 +-
drivers/net/cpfl/cpfl_ethdev.c | 8 +-
drivers/net/cxgbe/clip_tbl.c | 12 +-
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 +--
drivers/net/cxgbe/cxgbe_ofld.h | 6 +-
drivers/net/cxgbe/l2t.c | 12 +-
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +--
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 +-
drivers/net/cxgbe/smt.h | 2 +-
drivers/net/gve/base/gve_osdep.h | 4 +-
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
drivers/net/hns3/hns3_cmd.c | 18 +-
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 ++--
drivers/net/hns3/hns3_ethdev.h | 32 ++--
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++----
drivers/net/hns3/hns3_intr.c | 36 ++--
drivers/net/hns3/hns3_intr.h | 4 +-
drivers/net/hns3/hns3_mbx.c | 6 +-
drivers/net/hns3/hns3_mp.c | 6 +-
drivers/net/hns3/hns3_rxtx.c | 10 +-
drivers/net/hns3/hns3_tm.c | 4 +-
drivers/net/i40e/i40e_ethdev.c | 4 +-
drivers/net/i40e/i40e_rxtx.c | 6 +-
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf.h | 16 +-
drivers/net/iavf/iavf_rxtx.c | 4 +-
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +-
drivers/net/ice/base/ice_osdep.h | 4 +-
drivers/net/ice/ice_dcf.c | 6 +-
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 +-
drivers/net/ice/ice_dcf_parent.c | 16 +-
drivers/net/ice/ice_ethdev.c | 12 +-
drivers/net/ice/ice_ethdev.h | 2 +-
drivers/net/idpf/idpf_ethdev.c | 7 +-
drivers/net/ixgbe/ixgbe_ethdev.c | 14 +-
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 +-
drivers/net/memif/memif.h | 4 +-
drivers/net/memif/rte_eth_memif.c | 50 +++---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 +-
drivers/net/mlx5/mlx5.c | 9 +-
drivers/net/mlx5/mlx5.h | 66 ++++----
drivers/net/mlx5/mlx5_flow.c | 37 +++--
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 +++--
drivers/net/mlx5/mlx5_flow_dv.c | 126 +++++++-------
drivers/net/mlx5/mlx5_flow_flex.c | 14 +-
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++----
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++--
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++--
drivers/net/mlx5/mlx5_hws_cnt.c | 71 ++++----
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +-
drivers/net/mlx5/mlx5_rx.h | 14 +-
drivers/net/mlx5/mlx5_rxq.c | 30 ++--
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 +-
drivers/net/mlx5/mlx5_txpp.c | 84 +++++-----
drivers/net/mlx5/mlx5_txq.c | 12 +-
drivers/net/mlx5/mlx5_utils.c | 10 +-
drivers/net/mlx5/mlx5_utils.h | 4 +-
drivers/net/null/rte_eth_null.c | 12 +-
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +-
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +-
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx2_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx_ep_common.h | 4 +-
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 +-
drivers/net/octeontx/octeontx_ethdev.c | 8 +-
drivers/net/qede/base/bcm_osal.c | 6 +-
drivers/net/ring/rte_eth_ring.c | 8 +-
drivers/net/thunderx/nicvf_rxtx.c | 9 +-
drivers/net/thunderx/nicvf_struct.h | 4 +-
drivers/net/txgbe/txgbe_ethdev.c | 12 +-
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
drivers/net/virtio/virtio_ring.h | 4 +-
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 +-
drivers/net/virtio/virtqueue.h | 32 ++--
drivers/raw/ifpga/ifpga_rawdev.c | 9 +-
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +--
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +-
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++---
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 +-
examples/bbdev_app/main.c | 13 +-
examples/l2fwd-event/l2fwd_common.h | 4 +-
examples/l2fwd-event/l2fwd_event.c | 24 +--
examples/l2fwd-jobstats/main.c | 11 +-
.../client_server_mp/mp_server/main.c | 6 +-
examples/server_node_efd/efd_server/main.c | 6 +-
examples/vhost/main.c | 32 ++--
examples/vhost/main.h | 4 +-
examples/vhost/virtio_net.c | 13 +-
examples/vhost_blk/vhost_blk.c | 8 +-
examples/vm_power_manager/channel_monitor.c | 9 +-
179 files changed, 1628 insertions(+), 1496 deletions(-)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 01/45] net/mlx5: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 02/45] net/ixgbe: " Tyler Retzlaff
` (43 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 ++-
drivers/net/mlx5/mlx5.c | 9 ++-
drivers/net/mlx5/mlx5.h | 66 ++++++++---------
drivers/net/mlx5/mlx5_flow.c | 37 +++++-----
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 ++++++-----
drivers/net/mlx5/mlx5_flow_dv.c | 126 ++++++++++++++++----------------
drivers/net/mlx5/mlx5_flow_flex.c | 14 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++++++++-------
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++++----
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++++----
drivers/net/mlx5/mlx5_hws_cnt.c | 71 +++++++++---------
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +--
drivers/net/mlx5/mlx5_rx.h | 14 ++--
drivers/net/mlx5/mlx5_rxq.c | 30 ++++----
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 ++---
drivers/net/mlx5/mlx5_txpp.c | 84 ++++++++++-----------
drivers/net/mlx5/mlx5_txq.c | 12 +--
drivers/net/mlx5/mlx5_utils.c | 10 +--
drivers/net/mlx5/mlx5_utils.h | 4 +-
22 files changed, 351 insertions(+), 345 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index 40ea9d2..70bba6c 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -1918,9 +1918,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
return -ENOTSUP;
}
/* Check there is no concurrent mapping in other thread. */
- if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
- base, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
+ base,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
return 0;
}
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index b54f3cc..63da8f4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1117,7 +1117,7 @@
return 0;
}
/* Only need to check refcnt, 0 after "sh" is allocated. */
- if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+ if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
priv->lb_used = 1;
return 0;
@@ -1163,7 +1163,7 @@
claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
sh->self_lb.ibv_cq = NULL;
}
- __atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
return -rte_errno;
#else
RTE_SET_USED(dev);
@@ -1186,8 +1186,9 @@
if (!priv->lb_used)
return;
- MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
- if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {
+ MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
+ if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
if (sh->self_lb.qp) {
claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
sh->self_lb.qp = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d1a6382..2ff94db 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -855,8 +855,8 @@
ct_pool = mng->pools[idx];
for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
ct = &ct_pool->actions[i];
- val = __atomic_fetch_sub(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
MLX5_ASSERT(val == 1);
if (val > 1)
cnt++;
@@ -1082,7 +1082,8 @@
DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
return -ENOTSUP;
}
- if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
+ if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
+ rte_memory_order_relaxed) + 1 > 1)
return 0;
priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
@@ -1173,7 +1174,7 @@
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
- if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
return;
mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
mlx5_free(fp->flex.devx_fp);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0091a24..77c84b8 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -378,7 +378,7 @@ struct mlx5_drop {
struct mlx5_lb_ctx {
struct ibv_qp *qp; /* QP object. */
void *ibv_cq; /* Completion queue. */
- uint16_t refcnt; /* Reference count for representors. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
/* HW steering queue job descriptor type. */
@@ -481,10 +481,10 @@ enum mlx5_counter_type {
/* Counter age parameter. */
struct mlx5_age_param {
- uint16_t state; /**< Age state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
uint16_t port_id; /**< Port id of the counter. */
uint32_t timeout:24; /**< Aging timeout in seconds. */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/**< Time in seconds since last hit (atomically accessed). */
void *context; /**< Flow counter age context. */
};
@@ -497,7 +497,7 @@ struct flow_counter_stats {
/* Shared counters information for counters. */
struct mlx5_flow_counter_shared {
union {
- uint32_t refcnt; /* Only for shared action management. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
uint32_t id; /* User counter ID for legacy sharing. */
};
};
@@ -588,7 +588,7 @@ struct mlx5_counter_stats_raw {
/* Counter global management structure. */
struct mlx5_flow_counter_mng {
- volatile uint16_t n_valid; /* Number of valid pools. */
+ volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
uint16_t last_pool_idx; /* Last used pool index */
int min_id; /* The minimum counter ID in the pools. */
int max_id; /* The maximum counter ID in the pools. */
@@ -654,7 +654,7 @@ struct mlx5_aso_sq {
struct mlx5_aso_age_action {
LIST_ENTRY(mlx5_aso_age_action) next;
void *dr_action;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
/* Following fields relevant only when action is active. */
uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
struct mlx5_age_param age_params;
@@ -688,7 +688,7 @@ struct mlx5_geneve_tlv_option_resource {
rte_be16_t option_class; /* geneve tlv opt class.*/
uint8_t option_type; /* geneve tlv opt type.*/
uint8_t length; /* geneve tlv opt length. */
- uint32_t refcnt; /* geneve tlv object reference counter */
+ RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
};
@@ -903,7 +903,7 @@ struct mlx5_flow_meter_policy {
uint16_t group;
/* The group. */
rte_spinlock_t sl;
- uint32_t ref_cnt;
+ RTE_ATOMIC(uint32_t) ref_cnt;
/* Use count. */
struct rte_flow_pattern_template *hws_item_templ;
/* Hardware steering item templates. */
@@ -1038,7 +1038,7 @@ struct mlx5_flow_meter_profile {
struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
/**< srtcm_rfc2697 struct. */
};
- uint32_t ref_cnt; /**< Use count. */
+ RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
uint32_t g_support:1; /**< If G color will be generated. */
uint32_t y_support:1; /**< If Y color will be generated. */
uint32_t initialized:1; /**< Initialized. */
@@ -1078,7 +1078,7 @@ struct mlx5_aso_mtr {
enum mlx5_aso_mtr_type type;
struct mlx5_flow_meter_info fm;
/**< Pointer to the next aso flow meter structure. */
- uint8_t state; /**< ASO flow meter state. */
+ RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
uint32_t offset;
enum rte_color init_color;
};
@@ -1124,7 +1124,7 @@ struct mlx5_flow_mtr_mng {
/* Default policy table. */
uint32_t def_policy_id;
/* Default policy id. */
- uint32_t def_policy_ref_cnt;
+ RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
/** def_policy meter use count. */
struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
/* Meter drop table. */
@@ -1197,8 +1197,8 @@ struct mlx5_txpp_wq {
/* Tx packet pacing internal timestamp. */
struct mlx5_txpp_ts {
- uint64_t ci_ts;
- uint64_t ts;
+ RTE_ATOMIC(uint64_t) ci_ts;
+ RTE_ATOMIC(uint64_t) ts;
};
/* Tx packet pacing structure. */
@@ -1221,12 +1221,12 @@ struct mlx5_dev_txpp {
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
/* Statistics counters. */
- uint64_t err_miss_int; /* Missed service interrupt. */
- uint64_t err_rearm_queue; /* Rearm Queue errors. */
- uint64_t err_clock_queue; /* Clock Queue errors. */
- uint64_t err_ts_past; /* Timestamp in the past. */
- uint64_t err_ts_future; /* Timestamp in the distant future. */
- uint64_t err_ts_order; /* Timestamp not in ascending order. */
+ RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
+ RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
+ RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
+ RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
+ RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
+ RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
};
/* Sample ID information of eCPRI flex parser structure. */
@@ -1287,16 +1287,16 @@ struct mlx5_aso_ct_action {
void *dr_action_orig;
/* General action object for reply dir. */
void *dr_action_rply;
- uint32_t refcnt; /* Action used count in device flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */
uint16_t peer; /* The only peer port index could also use this CT. */
- enum mlx5_aso_ct_state state; /* ASO CT state. */
+ RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
bool is_original; /* The direction of the DR action to be used. */
};
/* CT action object state update. */
#define MLX5_ASO_CT_UPDATE_STATE(c, s) \
- __atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+ rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -1370,7 +1370,7 @@ struct mlx5_flex_pattern_field {
/* Port flex item context. */
struct mlx5_flex_item {
struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
- uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
uint32_t mapnum; /* Number of pattern translation entries. */
struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
@@ -1383,7 +1383,7 @@ struct mlx5_flex_item {
#define MLX5_SRV6_SAMPLE_NUM 5
/* Mlx5 internal flex parser profile structure. */
struct mlx5_internal_flex_parser_profile {
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
struct mlx5_flex_item flex; /* Hold map info for modify field. */
};
@@ -1512,9 +1512,9 @@ struct mlx5_dev_ctx_shared {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
#endif
- struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
- struct mlx5_hlist *modify_cmds;
- struct mlx5_hlist *tag_table;
+ RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
+ RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
+ RTE_ATOMIC(struct mlx5_hlist *) tag_table;
struct mlx5_list *port_id_action_list; /* Port ID action list. */
struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
struct mlx5_list *sample_action_list; /* List of sample actions. */
@@ -1525,7 +1525,7 @@ struct mlx5_dev_ctx_shared {
/* SW steering counters management structure. */
void *default_miss_action; /* Default miss action. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
- struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
+ RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
/* Shared interrupt handler section. */
struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
@@ -1570,7 +1570,7 @@ struct mlx5_dev_ctx_shared {
* Caution, secondary process may rebuild the struct during port start.
*/
struct mlx5_proc_priv {
- void *hca_bar;
+ RTE_ATOMIC(void *) hca_bar;
/* Mapped HCA PCI BAR area. */
size_t uar_table_sz;
/* Size of UAR register table. */
@@ -1635,7 +1635,7 @@ struct mlx5_rxq_obj {
/* Indirection table. */
struct mlx5_ind_table_obj {
LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
union {
void *ind_table; /**< Indirection table. */
struct mlx5_devx_obj *rqt; /* DevX RQT object. */
@@ -1826,7 +1826,7 @@ enum mlx5_quota_state {
};
struct mlx5_quota {
- uint8_t state; /* object state */
+ RTE_ATOMIC(uint8_t) state; /* object state */
uint8_t mode; /* metering mode */
/**
* Keep track of application update types.
@@ -1955,7 +1955,7 @@ struct mlx5_priv {
uint32_t flex_item_map; /* Map of allocated flex item elements. */
uint32_t nb_queue; /* HW steering queue number. */
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
- uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
+ RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -2007,7 +2007,7 @@ struct mlx5_priv {
#endif
struct rte_eth_dev *shared_host; /* Host device for HW steering. */
- uint16_t shared_refcnt; /* HW steering host reference counter. */
+ RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f31fdfb..1954975 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4623,8 +4623,8 @@ struct mlx5_translated_action_handle {
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
- __atomic_fetch_add(&shared_rss->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
+ rte_memory_order_relaxed);
return idx;
default:
break;
@@ -7459,7 +7459,7 @@ struct mlx5_list_entry *
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
- __atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
mlx5_free(default_miss_ctx.queue);
}
mlx5_flow_pop_thread_workspace();
@@ -7470,10 +7470,10 @@ struct mlx5_list_entry *
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
if (rss_desc->shared_rss)
- __atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
+ rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
+ rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
@@ -7976,7 +7976,8 @@ struct rte_flow *
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
+ rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
flow_mreg_del_copy_action(dev, flow);
@@ -9456,7 +9457,7 @@ struct mlx5_flow_workspace*
{
uint32_t pools_n, us;
- pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
+ pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -9558,17 +9559,17 @@ struct mlx5_flow_workspace*
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
age_param = MLX5_CNT_TO_AGE(cnt);
- if (__atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) != AGE_CANDIDATE)
+ if (rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) != AGE_CANDIDATE)
continue;
if (cur->data[i].hits != prev->data[i].hits) {
- __atomic_store_n(&age_param->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
continue;
}
- if (__atomic_fetch_add(&age_param->sec_since_last_hit,
+ if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
time_delta,
- __ATOMIC_RELAXED) + time_delta <= age_param->timeout)
+ rte_memory_order_relaxed) + time_delta <= age_param->timeout)
continue;
/**
* Hold the lock first, or if between the
@@ -9579,10 +9580,10 @@ struct mlx5_flow_workspace*
priv = rte_eth_devices[age_param->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_TMOUT, false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_TMOUT,
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
}
@@ -11407,7 +11408,7 @@ struct tunnel_db_element_release_ctx {
{
struct tunnel_db_element_release_ctx *ctx = x;
ctx->ret = 0;
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 34b5e0f..edfa76f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1049,7 +1049,7 @@ struct mlx5_flow_tunnel {
LIST_ENTRY(mlx5_flow_tunnel) chain;
struct rte_flow_tunnel app_tunnel; /** app tunnel copy */
uint32_t tunnel_id; /** unique tunnel ID */
- uint32_t refctn;
+ RTE_ATOMIC(uint32_t) refctn;
struct rte_flow_action action;
struct rte_flow_item item;
struct mlx5_hlist *groups; /** tunnel groups */
@@ -1470,7 +1470,7 @@ struct rte_flow_pattern_template {
struct mlx5dr_match_template *mt; /* mlx5 match template. */
uint64_t item_flags; /* Item layer flags. */
uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
/*
* If true, then rule pattern should be prepended with
* represented_port pattern item.
@@ -1502,7 +1502,7 @@ struct rte_flow_actions_template {
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint16_t recom_off; /* Offset of DR IPv6 routing push remove action. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
uint8_t flex_item; /* flex item index. */
};
@@ -1855,7 +1855,7 @@ struct rte_flow_template_table {
/* Shared RSS action structure */
struct mlx5_shared_action_rss {
ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
- uint32_t refcnt; /**< Atomically accessed refcnt. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
struct mlx5_ind_table_obj *ind_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index ab9eb21..a94b228 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -619,7 +619,7 @@
uint8_t *u8addr;
uint8_t hit;
- if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=
AGE_CANDIDATE)
continue;
byte = 63 - (j / 8);
@@ -627,13 +627,13 @@
u8addr = (uint8_t *)addr;
hit = (u8addr[byte] >> offset) & 0x1;
if (hit) {
- __atomic_store_n(&ap->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
} else {
struct mlx5_priv *priv;
- __atomic_fetch_add(&ap->sec_since_last_hit,
- diff, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
+ diff, rte_memory_order_relaxed);
/* If timeout passed add to aged-out list. */
if (ap->sec_since_last_hit <= ap->timeout)
continue;
@@ -641,12 +641,11 @@
rte_eth_devices[ap->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&ap->state,
+ if (rte_atomic_compare_exchange_strong_explicit(&ap->state,
&expected,
AGE_TMOUT,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
LIST_INSERT_HEAD(&age_info->aged_aso,
act, next);
MLX5_AGE_SET(age_info,
@@ -946,10 +945,10 @@
for (i = 0; i < n; ++i) {
aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
MLX5_ASSERT(aso_mtr);
- verdict = __atomic_compare_exchange_n(&aso_mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
}
sq->tail += n;
@@ -1005,10 +1004,10 @@
mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,
MLX5_INDIRECT_ACTION_IDX_GET(job->action));
MLX5_ASSERT(mtr);
- verdict = __atomic_compare_exchange_n(&mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
}
@@ -1103,7 +1102,7 @@
struct mlx5_aso_sq *sq;
struct mlx5_dev_ctx_shared *sh = priv->sh;
uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
- uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+ uint8_t state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);
poll_cq_t poll_mtr_cq =
is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;
@@ -1112,7 +1111,7 @@
sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);
do {
poll_mtr_cq(priv, sq);
- if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==
ASO_METER_READY)
return 0;
/* Waiting for CQE ready. */
@@ -1411,7 +1410,7 @@
uint16_t wqe_idx;
struct mlx5_aso_ct_pool *pool;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_FREE) {
DRV_LOG(ERR, "Fail: No context to query");
@@ -1620,12 +1619,12 @@
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
else
sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
/* Waiting for CQE ready, consider should block or sleep. */
@@ -1791,7 +1790,7 @@
bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (sh->config.dv_flow_en == 2)
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
@@ -1807,7 +1806,7 @@
}
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ state = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_READY ||
state == ASO_CONNTRACK_QUERY)
return 0;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d434c67..f9c56af 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -313,7 +313,7 @@ enum mlx5_l3_tunnel_detection {
}
static inline struct mlx5_hlist *
-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,
const char *name, uint32_t size, bool direct_key,
bool lcores_share, void *ctx,
mlx5_list_create_cb cb_create,
@@ -327,7 +327,7 @@ enum mlx5_l3_tunnel_detection {
struct mlx5_hlist *expected = NULL;
char s[MLX5_NAME_SIZE];
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
if (likely(hl))
return hl;
snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
@@ -341,11 +341,11 @@ enum mlx5_l3_tunnel_detection {
"cannot allocate resource memory");
return NULL;
}
- if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_hlist_destroy(hl);
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
}
return hl;
}
@@ -6139,8 +6139,8 @@ struct mlx5_list_entry *
static struct mlx5_indexed_pool *
flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
{
- struct mlx5_indexed_pool *ipool = __atomic_load_n
- (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+ struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
+ (&sh->mdh_ipools[index], rte_memory_order_seq_cst);
if (!ipool) {
struct mlx5_indexed_pool *expected = NULL;
@@ -6165,13 +6165,13 @@ struct mlx5_list_entry *
ipool = mlx5_ipool_create(&cfg);
if (!ipool)
return NULL;
- if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
- &expected, ipool, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
+ &expected, ipool,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_ipool_destroy(ipool);
- ipool = __atomic_load_n(&sh->mdh_ipools[index],
- __ATOMIC_SEQ_CST);
+ ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
+ rte_memory_order_seq_cst);
}
}
return ipool;
@@ -6992,9 +6992,9 @@ struct mlx5_list_entry *
age_info = GET_PORT_AGE_INFO(priv);
age_param = flow_dv_counter_idx_get_age(dev, counter);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since counter may still in process.
@@ -7002,7 +7002,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -7038,8 +7038,8 @@ struct mlx5_list_entry *
* indirect action API, shared info is 1 before the reduction,
* so this condition is failed and function doesn't return here.
*/
- if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
- __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
+ rte_memory_order_relaxed) - 1)
return;
}
cnt->pool = pool;
@@ -10203,8 +10203,8 @@ struct mlx5_list_entry *
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
} else {
ret = rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -10243,8 +10243,8 @@ struct mlx5_list_entry *
geneve_opt_resource->option_class = geneve_opt_v->option_class;
geneve_opt_resource->option_type = geneve_opt_v->option_type;
geneve_opt_resource->length = geneve_opt_v->option_len;
- __atomic_store_n(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
}
exit:
rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
@@ -12192,8 +12192,8 @@ struct mlx5_list_entry *
(void *)(uintptr_t)(dev_flow->flow_idx);
age_param->timeout = age->timeout;
age_param->port_id = dev->data->port_id;
- __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);
return counter;
}
@@ -13241,9 +13241,9 @@ struct mlx5_list_entry *
uint16_t expected = AGE_CANDIDATE;
age_info = GET_PORT_AGE_INFO(priv);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since age action may still in process.
@@ -13251,7 +13251,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
LIST_REMOVE(age, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -13275,7 +13275,7 @@ struct mlx5_list_entry *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
- uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
flow_dv_aso_age_remove_from_age(dev, age);
@@ -13451,7 +13451,7 @@ struct mlx5_list_entry *
return 0; /* 0 is an error. */
}
}
- __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);
return pool->index | ((age_free->offset + 1) << 16);
}
@@ -13481,10 +13481,10 @@ struct mlx5_list_entry *
aso_age->age_params.context = context;
aso_age->age_params.timeout = timeout;
aso_age->age_params.port_id = dev->data->port_id;
- __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
- __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
static void
@@ -13666,12 +13666,12 @@ struct mlx5_list_entry *
uint32_t ret;
struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
/* Cannot release when CT is in the ASO SQ. */
if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
return -1;
- ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
if (ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -13861,7 +13861,7 @@ struct mlx5_list_entry *
pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
/* 0: inactive, 1: created, 2+: used by flows. */
- __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
if (!ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -14813,8 +14813,8 @@ struct mlx5_list_entry *
age_act = flow_aso_age_get_by_idx(dev, owner_idx);
if (flow->age == 0) {
flow->age = owner_idx;
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&age_act->refcnt, 1,
+ rte_memory_order_relaxed);
}
age_act_pos = actions_n++;
action_flags |= MLX5_FLOW_ACTION_AGE;
@@ -14851,9 +14851,9 @@ struct mlx5_list_entry *
} else {
if (flow->counter == 0) {
flow->counter = owner_idx;
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&cnt_act->shared_info.refcnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
}
/* Save information first, will apply later. */
action_flags |= MLX5_FLOW_ACTION_COUNT;
@@ -15185,8 +15185,8 @@ struct mlx5_list_entry *
flow->indirect_type =
MLX5_INDIRECT_ACTION_TYPE_CT;
flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
}
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
@@ -15855,7 +15855,7 @@ struct mlx5_list_entry *
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
- __atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
}
void
@@ -16038,8 +16038,8 @@ struct mlx5_list_entry *
sh->geneve_tlv_option_resource;
rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
if (geneve_opt_resource) {
- if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED) - 1)) {
+ if (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
claim_zero(mlx5_devx_cmd_destroy
(geneve_opt_resource->obj));
mlx5_free(sh->geneve_tlv_option_resource);
@@ -16448,7 +16448,7 @@ struct mlx5_list_entry *
/* Update queue with indirect table queue memoyr. */
origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
- __atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
@@ -16494,9 +16494,9 @@ struct mlx5_list_entry *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid shared action");
- if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
- 0, 0, __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,
+ 0, rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -16632,10 +16632,10 @@ struct rte_flow_action_handle *
return __flow_dv_action_rss_release(dev, idx, error);
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
- if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
- &no_flow_refcnt, 1, false,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
+ &no_flow_refcnt, 1,
+ rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -17595,13 +17595,13 @@ struct rte_flow_action_handle *
case MLX5_INDIRECT_ACTION_TYPE_AGE:
age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
resp = data;
- resp->aged = __atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) == AGE_TMOUT ?
+ resp->aged = rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) == AGE_TMOUT ?
1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_dv_query_count(dev, idx, data, error);
@@ -17678,12 +17678,12 @@ struct rte_flow_action_handle *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
}
- resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+ resp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==
AGE_TMOUT ? 1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 4ae03a2..8a02247 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -86,7 +86,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map |= 1u << idx;
}
}
@@ -107,7 +107,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map &= ~(1u << idx);
rte_spinlock_unlock(&priv->flex_item_sl);
}
@@ -379,7 +379,7 @@
return ret;
}
if (acquire)
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return ret;
}
@@ -414,7 +414,7 @@
rte_errno = -EINVAL;
return -EINVAL;
}
- __atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
return 0;
}
@@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *
}
flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
/* Mark initialized flex item valid. */
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return (struct rte_flow_item_flex_handle *)flex;
error:
@@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
"invalid flex item handle value");
}
- if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
+ rte_memory_order_acquire, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&priv->flex_item_sl);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 35f1ed7..7f8d234 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -715,7 +715,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
}
if (acts->mark)
- if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed) - 1))
flow_hw_rxq_flag_set(dev, false);
if (acts->jump) {
@@ -2298,7 +2299,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
goto err;
acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
- __atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
@@ -4537,8 +4539,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
uint8_t i;
for (i = 0; i < nb_action_templates; i++) {
- uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED);
+ uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (refcnt <= 1) {
rte_flow_error_set(error, EINVAL,
@@ -4576,8 +4578,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
while (i--) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_sub_fetch(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
return rte_errno;
}
@@ -4748,8 +4750,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
- ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED) + 1;
+ ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (ret <= 1) {
rte_errno = EINVAL;
goto it_error;
@@ -4800,14 +4802,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
for (i = 0; i < nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_fetch_sub(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
i = nb_item_templates;
it_error:
while (i--)
- __atomic_fetch_sub(&item_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
error:
err = rte_errno;
if (tbl) {
@@ -5039,12 +5041,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
LIST_REMOVE(table, next);
for (i = 0; i < table->nb_item_templates; i++)
- __atomic_fetch_sub(&table->its[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
+ 1, rte_memory_order_relaxed);
for (i = 0; i < table->nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
- __atomic_fetch_sub(&table->ats[i].action_template->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
+ 1, rte_memory_order_relaxed);
}
flow_hw_destroy_table_multi_pattern_ctx(table);
if (table->matcher_info[0].matcher)
@@ -7287,7 +7289,7 @@ enum mlx5_hw_indirect_list_relative_position {
if (!at->tmpl)
goto error;
at->action_flags = action_flags;
- __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
return at;
error:
@@ -7323,7 +7325,7 @@ enum mlx5_hw_indirect_list_relative_position {
uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Action template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -7897,7 +7899,7 @@ enum mlx5_hw_indirect_list_relative_position {
break;
}
}
- __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
rte_errno = pattern_template_validate(dev, &it, 1);
if (rte_errno)
goto error;
@@ -7933,7 +7935,7 @@ enum mlx5_hw_indirect_list_relative_position {
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Item template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -10513,7 +10515,8 @@ struct mlx5_list_entry *
}
dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
priv->shared_host = host_dev;
- __atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
}
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
@@ -10698,7 +10701,8 @@ struct mlx5_list_entry *
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
if (priv->hw_q) {
@@ -10814,7 +10818,8 @@ struct mlx5_list_entry *
priv->hw_q = NULL;
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
mlx5_free(priv->hw_attr);
@@ -10872,8 +10877,8 @@ struct mlx5_list_entry *
NULL,
"Invalid CT destruction index");
}
- __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
+ rte_memory_order_relaxed);
mlx5_ipool_free(pool->cts, idx);
return 0;
}
@@ -11575,7 +11580,7 @@ struct mlx5_hw_q_job *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_REPORTED:
case HWS_AGE_AGED_OUT_NOT_REPORTED:
resp->aged = 1;
@@ -11595,8 +11600,8 @@ struct mlx5_hw_q_job *
}
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (¶m->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (¶m->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index 4045c4c..f8eff60 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -2055,9 +2055,9 @@ struct mlx5_flow_meter_policy *
NULL, "Meter profile id not valid.");
/* Meter policy must exist. */
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
if (!priv->sh->config.dv_esw_en)
domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -2137,7 +2137,7 @@ struct mlx5_flow_meter_policy *
fm->is_enable = params->meter_enable;
fm->shared = !!shared;
fm->color_aware = !!params->use_prev_mtr_color;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
fm->def_policy = 1;
fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
@@ -2166,7 +2166,7 @@ struct mlx5_flow_meter_policy *
}
fm->active_state = params->meter_enable;
if (mtr_policy)
- __atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
error:
mlx5_flow_destroy_mtr_tbls(dev, fm);
@@ -2271,8 +2271,8 @@ struct mlx5_flow_meter_policy *
NULL, "Failed to create devx meter.");
}
fm->active_state = params->meter_enable;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
- __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
}
#endif
@@ -2295,7 +2295,7 @@ struct mlx5_flow_meter_policy *
if (fmp == NULL)
return -1;
/* Update dependencies. */
- __atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);
fm->profile = NULL;
/* Remove from list. */
if (!priv->sh->meter_aso_en) {
@@ -2313,15 +2313,15 @@ struct mlx5_flow_meter_policy *
}
mlx5_flow_destroy_mtr_tbls(dev, fm);
if (fm->def_policy)
- __atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
+ 1, rte_memory_order_relaxed);
if (priv->sh->meter_aso_en) {
if (!fm->def_policy) {
mtr_policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
if (mtr_policy)
- __atomic_fetch_sub(&mtr_policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
fm->policy_id = 0;
}
fm->def_policy = 0;
@@ -2424,13 +2424,13 @@ struct mlx5_flow_meter_policy *
RTE_MTR_ERROR_TYPE_UNSPECIFIED,
NULL, "Meter object is being used.");
/* Destroy the meter profile. */
- __atomic_fetch_sub(&fm->profile->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
+ 1, rte_memory_order_relaxed);
/* Destroy the meter policy. */
policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
- __atomic_fetch_sub(&policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c
index 14a2a8b..6ad0e8a 100644
--- a/drivers/net/mlx5/mlx5_flow_quota.c
+++ b/drivers/net/mlx5/mlx5_flow_quota.c
@@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
struct mlx5_quota *quota_obj =
sq->elts[(sq->tail + i) & mask].quota_obj;
- __atomic_compare_exchange_n("a_obj->state, &state,
- MLX5_QUOTA_STATE_READY, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit("a_obj->state, &state,
+ MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
}
}
@@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
rte_spinlock_lock(&sq->sqsl);
mlx5_quota_cmd_completion_handle(sq);
rte_spinlock_unlock(&sq->sqsl);
- if (__atomic_load_n("a_obj->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit("a_obj->state, rte_memory_order_relaxed) ==
MLX5_QUOTA_STATE_READY)
return 0;
} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
{
uint8_t state = MLX5_QUOTA_STATE_READY;
- bool verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ bool verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict)
return rte_flow_error_set(error, EBUSY,
@@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
async_job ? async_job : &sync_job, push, NULL);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
async_job ? async_job : &sync_job, push,
(void *)(uintptr_t)update->conf);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -593,9 +593,9 @@ struct rte_flow_action_handle *
NULL, "quota: failed to allocate quota object");
return NULL;
}
- verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: new quota object has invalid state");
@@ -616,8 +616,8 @@ struct rte_flow_action_handle *
(void *)(uintptr_t)conf);
if (ret) {
mlx5_ipool_free(qctx->quota_ipool, id);
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
+ rte_memory_order_relaxed);
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: WR failure");
return 0;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index c31f2f3..1b625e0 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -149,7 +149,7 @@
}
if (param->timeout == 0)
continue;
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_NOT_REPORTED:
case HWS_AGE_AGED_OUT_REPORTED:
/* Already aged-out, no action is needed. */
@@ -171,8 +171,8 @@
hits = rte_be_to_cpu_64(stats[i].hits);
if (param->nb_cnts == 1) {
if (hits != param->accumulator_last_hits) {
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
param->accumulator_last_hits = hits;
continue;
}
@@ -184,8 +184,8 @@
param->accumulator_cnt = 0;
if (param->accumulator_last_hits !=
param->accumulator_hits) {
- __atomic_store_n(¶m->sec_since_last_hit,
- 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit,
+ 0, rte_memory_order_relaxed);
param->accumulator_last_hits =
param->accumulator_hits;
param->accumulator_hits = 0;
@@ -193,9 +193,9 @@
}
param->accumulator_hits = 0;
}
- if (__atomic_fetch_add(¶m->sec_since_last_hit, time_delta,
- __ATOMIC_RELAXED) + time_delta <=
- __atomic_load_n(¶m->timeout, __ATOMIC_RELAXED))
+ if (rte_atomic_fetch_add_explicit(¶m->sec_since_last_hit, time_delta,
+ rte_memory_order_relaxed) + time_delta <=
+ rte_atomic_load_explicit(¶m->timeout, rte_memory_order_relaxed))
continue;
/* Prepare the relevant ring for this AGE parameter */
if (priv->hws_strict_queue)
@@ -203,10 +203,10 @@
else
r = age_info->hw_age.aged_list;
/* Changing the state atomically and insert it into the ring. */
- if (__atomic_compare_exchange_n(¶m->state, &expected1,
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected1,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
sizeof(uint32_t),
1, NULL);
@@ -221,11 +221,10 @@
*/
expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
if (ret == 0 &&
- !__atomic_compare_exchange_n(¶m->state,
+ !rte_atomic_compare_exchange_strong_explicit(¶m->state,
&expected2, expected1,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected2 == HWS_AGE_FREE)
mlx5_hws_age_param_free(priv,
param->own_cnt_index,
@@ -235,10 +234,10 @@
if (!priv->hws_strict_queue)
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
} else {
- __atomic_compare_exchange_n(¶m->state, &expected2,
+ rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected2,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
}
}
/* The event is irrelevant in strict queue mode. */
@@ -796,8 +795,8 @@ struct mlx5_hws_cnt_pool *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
- switch (__atomic_exchange_n(¶m->state, HWS_AGE_FREE,
- __ATOMIC_RELAXED)) {
+ switch (rte_atomic_exchange_explicit(¶m->state, HWS_AGE_FREE,
+ rte_memory_order_relaxed)) {
case HWS_AGE_CANDIDATE:
case HWS_AGE_AGED_OUT_REPORTED:
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
@@ -862,8 +861,8 @@ struct mlx5_hws_cnt_pool *
"cannot allocate AGE parameter");
return 0;
}
- MLX5_ASSERT(__atomic_load_n(¶m->state,
- __ATOMIC_RELAXED) == HWS_AGE_FREE);
+ MLX5_ASSERT(rte_atomic_load_explicit(¶m->state,
+ rte_memory_order_relaxed) == HWS_AGE_FREE);
if (shared) {
param->nb_cnts = 0;
param->accumulator_hits = 0;
@@ -914,9 +913,9 @@ struct mlx5_hws_cnt_pool *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
if (update_ade->timeout_valid) {
- uint32_t old_timeout = __atomic_exchange_n(¶m->timeout,
+ uint32_t old_timeout = rte_atomic_exchange_explicit(¶m->timeout,
update_ade->timeout,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (old_timeout == 0)
sec_since_last_hit_reset = true;
@@ -935,8 +934,8 @@ struct mlx5_hws_cnt_pool *
state_update = true;
}
if (sec_since_last_hit_reset)
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
if (state_update) {
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
@@ -945,13 +944,13 @@ struct mlx5_hws_cnt_pool *
* - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
* - AGED_OUT_REPORTED -> CANDIDATE
*/
- if (!__atomic_compare_exchange_n(¶m->state, &expected,
+ if (!rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
HWS_AGE_CANDIDATE_INSIDE_RING,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected == HWS_AGE_AGED_OUT_REPORTED)
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
return 0;
}
@@ -976,9 +975,9 @@ struct mlx5_hws_cnt_pool *
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
MLX5_ASSERT(param != NULL);
- if (__atomic_compare_exchange_n(¶m->state, &expected,
- HWS_AGE_AGED_OUT_REPORTED, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
+ HWS_AGE_AGED_OUT_REPORTED,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
return param->context;
switch (expected) {
case HWS_AGE_FREE:
@@ -990,8 +989,8 @@ struct mlx5_hws_cnt_pool *
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
break;
case HWS_AGE_CANDIDATE_INSIDE_RING:
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
break;
case HWS_AGE_CANDIDATE:
/*
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index e005960..481442f 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -101,7 +101,7 @@ struct mlx5_hws_cnt_pool {
LIST_ENTRY(mlx5_hws_cnt_pool) next;
struct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;
struct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;
- uint32_t query_gen __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) query_gen __rte_cache_aligned;
struct mlx5_hws_cnt *pool;
struct mlx5_hws_cnt_raw_data_mng *raw_mng;
struct rte_ring *reuse_list;
@@ -134,10 +134,10 @@ enum {
/* HWS counter age parameter. */
struct mlx5_hws_age_param {
- uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/* Time in seconds since last hit (atomically accessed). */
- uint16_t state; /* AGE state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
uint64_t accumulator_last_hits;
/* Last total value of hits for comparing. */
uint64_t accumulator_hits;
@@ -426,7 +426,7 @@ struct mlx5_hws_age_param {
iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
hpool->pool[iidx].in_used = false;
hpool->pool[iidx].query_gen_when_free =
- __atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
qcache = hpool->cache->qcache[*queue];
if (unlikely(qcache == NULL)) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 2fce908..c627113 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {
/* RX queue private data. */
struct mlx5_rxq_priv {
uint16_t idx; /* Queue index. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -188,7 +188,7 @@ struct mlx5_rxq_priv {
/* External RX queue descriptor. */
struct mlx5_external_rxq {
uint32_t hw_id; /* Queue index in the Hardware. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
};
/* mlx5_rxq.c */
@@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
void *addr;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {
MLX5_ASSERT(rep != NULL);
/* Replace MPRQ buf. */
(*rxq->mprq_bufs)[rq_idx] = rep;
@@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
void *buf_addr;
/* Increment the refcnt of the whole chunk. */
- __atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
- MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
- __ATOMIC_RELAXED) <= strd_n + 1);
+ rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
+ MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
+ rte_memory_order_relaxed) <= strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
/*
* MLX5 device doesn't use iova but it is necessary in a
@@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
return false;
rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
- return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+ return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);
}
#define LWM_COOKIE_RXQID_OFFSET 0
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dd51687..f67aaa6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -416,7 +416,7 @@
rte_errno = EINVAL;
return -rte_errno;
}
- return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1319,7 +1319,7 @@
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
for (j = 0; j != strd_n; ++j) {
shinfo = &buf->shinfos[j];
shinfo->free_cb = mlx5_mprq_buf_free_cb;
@@ -2037,7 +2037,7 @@ struct mlx5_rxq_priv *
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
if (rxq != NULL)
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2059,7 +2059,7 @@ struct mlx5_rxq_priv *
if (rxq == NULL)
return 0;
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2138,7 +2138,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2158,7 +2158,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2447,8 +2447,8 @@ struct mlx5_ind_table_obj *
(memcmp(ind_tbl->queues, queues,
ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
== 0)) {
- __atomic_fetch_add(&ind_tbl->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
+ rte_memory_order_relaxed);
break;
}
}
@@ -2479,7 +2479,7 @@ struct mlx5_ind_table_obj *
unsigned int ret;
rte_rwlock_write_lock(&priv->ind_tbls_lock);
- ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
@@ -2561,7 +2561,7 @@ struct mlx5_ind_table_obj *
}
return ret;
}
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
return 0;
}
@@ -2626,7 +2626,7 @@ struct mlx5_ind_table_obj *
{
uint32_t refcnt;
- refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+ refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
if (refcnt <= 1)
return 0;
/*
@@ -3258,8 +3258,8 @@ struct mlx5_hrxq *
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
if (ext_rxq == NULL)
return -rte_errno;
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
if (ext_rxq->hw_id != hw_idx) {
DRV_LOG(ERR, "Port %u external RxQ index %u "
"is already mapped to HW index (requesting is "
@@ -3296,8 +3296,8 @@ struct mlx5_hrxq *
rte_errno = EINVAL;
return -rte_errno;
}
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
port_id, dpdk_idx);
rte_errno = EINVAL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f8d6728..c241a1d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1441,7 +1441,7 @@
rte_delay_us_sleep(1000 * priv->rxqs_n);
DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
if (priv->sh->config.dv_flow_en == 2) {
- if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))
flow_hw_rxq_flag_set(dev, false);
} else {
mlx5_flow_stop_default(dev);
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index b1e8ea1..0e44df5 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -179,7 +179,7 @@ struct mlx5_txq_data {
__extension__
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
bool is_hairpin; /* Whether TxQ type is Hairpin. */
unsigned int max_inline_data; /* Max inline data. */
@@ -339,8 +339,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
* the service thread, data should be re-read.
*/
rte_compiler_barrier();
- ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
- ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
+ ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
+ ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
break;
@@ -350,8 +350,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts -= ts;
if (unlikely(mts >= UINT64_MAX / 2)) {
/* We have negative integer, mts is in the past. */
- __atomic_fetch_add(&sh->txpp.err_ts_past,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
+ 1, rte_memory_order_relaxed);
return -1;
}
tick = sh->txpp.tick;
@@ -360,8 +360,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts = (mts + tick - 1) / tick;
if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
/* We have mts is too distant future. */
- __atomic_fetch_add(&sh->txpp.err_ts_future,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
+ 1, rte_memory_order_relaxed);
return -1;
}
mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
@@ -1743,8 +1743,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
/* Convert the timestamp into completion to wait. */
ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
if (txq->ts_last && ts < txq->ts_last)
- __atomic_fetch_add(&txq->sh->txpp.err_ts_order,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
+ 1, rte_memory_order_relaxed);
txq->ts_last = ts;
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
sh = txq->sh;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 5a5df2d..4e26fa2 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -538,12 +538,12 @@
uint64_t *ps;
rte_compiler_barrier();
- tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
- op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
+ tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);
+ op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);
rte_compiler_barrier();
- if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
+ if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))
continue;
- if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
+ if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))
continue;
ps = (uint64_t *)ts;
ps[0] = tm;
@@ -561,8 +561,8 @@
ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
rte_compiler_barrier();
- __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);
rte_wmb();
}
@@ -590,8 +590,8 @@
*/
DRV_LOG(DEBUG,
"Clock Queue error sync lost (%X).", opcode);
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
return;
@@ -633,10 +633,10 @@
if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
return;
MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
- sh->txpp.ts.ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
- sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+ sh->txpp.ts.ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+ sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
sh->txpp.ts_p = 0;
if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -677,8 +677,8 @@
/* Check whether we have missed interrupts. */
if (cq_ci - wq->cq_ci != 1) {
DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
- __atomic_fetch_add(&sh->txpp.err_miss_int,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
+ 1, rte_memory_order_relaxed);
/* Check sync lost on wqe index. */
if (cq_ci - wq->cq_ci >=
(((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -693,8 +693,8 @@
/* Fire new requests to Rearm Queue. */
if (error) {
DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_rearm_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
}
@@ -987,8 +987,8 @@
mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
if (to.cts.op_own >> 4) {
DRV_LOG(DEBUG, "Clock Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
return -EIO;
}
@@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);
return 0;
}
@@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
do {
uint64_t ts, ci;
- ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
- ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
+ ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);
+ ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ts,
- __ATOMIC_RELAXED) != ts)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
+ rte_memory_order_relaxed) != ts)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
- __ATOMIC_RELAXED) != ci)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
+ rte_memory_order_relaxed) != ci)
continue;
tsa->ts = ts;
tsa->ci_ts = ci;
@@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
for (i = 0; i < n_txpp; ++i)
stats[n_used + i].id = n_used + i;
stats[n_used + 0].value =
- __atomic_load_n(&sh->txpp.err_miss_int,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_miss_int,
+ rte_memory_order_relaxed);
stats[n_used + 1].value =
- __atomic_load_n(&sh->txpp.err_rearm_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
+ rte_memory_order_relaxed);
stats[n_used + 2].value =
- __atomic_load_n(&sh->txpp.err_clock_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
+ rte_memory_order_relaxed);
stats[n_used + 3].value =
- __atomic_load_n(&sh->txpp.err_ts_past,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_past,
+ rte_memory_order_relaxed);
stats[n_used + 4].value =
- __atomic_load_n(&sh->txpp.err_ts_future,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_future,
+ rte_memory_order_relaxed);
stats[n_used + 5].value =
- __atomic_load_n(&sh->txpp.err_ts_order,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_order,
+ rte_memory_order_relaxed);
stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
stats[n_used + 8].value = sh->txpp.sync_lost;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 14f55e8..da4236f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *
rte_errno = ENOMEM;
goto error;
}
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
tmpl->is_hairpin = false;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->is_hairpin = true;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
- __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
}
return ctrl;
}
@@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *
if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
return 0;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
+ if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
return 1;
if (txq_ctrl->obj) {
priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *
txq_free_elts(txq_ctrl);
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
- if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
if (!txq_ctrl->is_hairpin)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq_ctrl, next);
@@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *
if (!(*priv->txqs)[idx])
return -1;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
}
/**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index e28db2e..fc03cc0 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -203,7 +203,7 @@ struct mlx5_indexed_pool *
struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
lc = pool->cache[cidx]->lc;
- gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
+ gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
if (gc && lc != gc) {
mlx5_ipool_lock(pool);
if (lc && !(--lc->ref_cnt))
@@ -266,8 +266,8 @@ struct mlx5_indexed_pool *
pool->cache[cidx]->len = fetch_size - 1;
return pool->cache[cidx]->idx[pool->cache[cidx]->len];
}
- trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
- __ATOMIC_ACQUIRE) : 0;
+ trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
+ rte_memory_order_acquire) : 0;
trunk_n = lc ? lc->n_trunk : 0;
cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
/* Check if index reach maximum. */
@@ -332,11 +332,11 @@ struct mlx5_indexed_pool *
lc = p;
lc->ref_cnt = 1;
pool->cache[cidx]->lc = lc;
- __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
}
/* Add trunk to trunks array. */
lc->trunks[trunk_idx] = trunk;
- __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
/* Enqueue half of the index to global. */
ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
fetch_size = trunk->free >> 1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index f3c0d76..3146092 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {
struct mlx5_indexed_cache {
struct mlx5_indexed_trunk **trunks;
- volatile uint32_t n_trunk_valid; /* Trunks allocated. */
+ volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
uint32_t n_trunk; /* Trunk pointer array size. */
uint32_t ref_cnt;
uint32_t len;
@@ -266,7 +266,7 @@ struct mlx5_indexed_pool {
uint32_t free_list; /* Index to first free trunk. */
};
struct {
- struct mlx5_indexed_cache *gc;
+ RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
/* Global cache. */
struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
/* Local cache. */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 02/45] net/ixgbe: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 01/45] net/mlx5: use rte " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 03/45] net/iavf: " Tyler Retzlaff
` (42 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 14 ++++++++------
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 ++--
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c61c52b..e63ae1a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1130,7 +1130,7 @@ struct rte_ixgbe_xstats_name_off {
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbe_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -1638,7 +1638,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbevf_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
@@ -4203,7 +4203,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
/* NOTE: review for potential ordering optimization */
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -4240,7 +4240,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -4336,7 +4336,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
ixgbe_dev_wait_setup_link_complete(dev, 0);
/* NOTE: review for potential ordering optimization */
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the IXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -4348,7 +4349,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR,
"Create link thread failed!");
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 22fc3be..8ad841e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -511,7 +511,7 @@ struct ixgbe_adapter {
*/
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
- bool link_thread_running;
+ RTE_ATOMIC(bool) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index f6c17d4..e7dfd6f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1831,7 +1831,7 @@
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
@@ -2114,7 +2114,7 @@
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 03/45] net/iavf: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 02/45] net/ixgbe: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 04/45] net/ice: " Tyler Retzlaff
` (41 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/iavf/iavf.h | 16 ++++++++--------
drivers/net/iavf/iavf_rxtx.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +++++++-------
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 824ae4a..6b977e5 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -238,8 +238,8 @@ struct iavf_info {
struct virtchnl_vlan_caps vlan_v2_caps;
uint64_t supported_rxdid;
uint8_t *proto_xtr; /* proto xtr type for all queues */
- volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
- uint32_t pend_cmd_count;
+ volatile RTE_ATOMIC(enum virtchnl_ops) pend_cmd; /* pending command not finished */
+ RTE_ATOMIC(uint32_t) pend_cmd_count;
int cmd_retval; /* return value of the cmd response from PF */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
@@ -456,13 +456,13 @@ struct iavf_cmd_info {
_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 1, rte_memory_order_relaxed);
return !ret;
}
@@ -472,13 +472,13 @@ struct iavf_cmd_info {
_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 2, rte_memory_order_relaxed);
return !ret;
}
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 0a5246d..d1d4e9f 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2025,7 +2025,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
@@ -2152,7 +2152,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
index 83825aa..20b656e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_neon.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
@@ -273,7 +273,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1111d30..6d5969f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -41,7 +41,7 @@ struct iavf_event_element {
};
struct iavf_event_handler {
- uint32_t ndev;
+ RTE_ATOMIC(uint32_t) ndev;
rte_thread_t tid;
int fd[2];
pthread_mutex_t lock;
@@ -129,7 +129,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_add(&handler->ndev, 1, __ATOMIC_RELAXED) + 1 != 1)
+ if (rte_atomic_fetch_add_explicit(&handler->ndev, 1, rte_memory_order_relaxed) + 1 != 1)
return 0;
#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
@@ -137,7 +137,7 @@ struct iavf_event_handler {
int err = pipe(handler->fd);
#endif
if (err != 0) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -146,7 +146,7 @@ struct iavf_event_handler {
if (rte_thread_create_internal_control(&handler->tid, "iavf-event",
iavf_dev_event_handle, NULL)) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -158,7 +158,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed) - 1 != 0)
return;
int unused = pthread_cancel((pthread_t)handler->tid.opaque_id);
@@ -574,8 +574,8 @@ struct iavf_event_handler {
/* read message and it's expected one */
if (msg_opc == vf->pend_cmd) {
uint32_t cmd_count =
- __atomic_fetch_sub(&vf->pend_cmd_count,
- 1, __ATOMIC_RELAXED) - 1;
+ rte_atomic_fetch_sub_explicit(&vf->pend_cmd_count,
+ 1, rte_memory_order_relaxed) - 1;
if (cmd_count == 0)
_notify_cmd(vf, msg_ret);
} else {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 04/45] net/ice: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (2 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 03/45] net/iavf: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 05/45] net/i40e: " Tyler Retzlaff
` (40 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/ice/base/ice_osdep.h | 4 ++--
drivers/net/ice/ice_dcf.c | 6 +++---
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 ++++----
drivers/net/ice/ice_dcf_parent.c | 16 ++++++++--------
drivers/net/ice/ice_ethdev.c | 12 ++++++------
drivers/net/ice/ice_ethdev.h | 2 +-
7 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index 0e14b93..c17f1bf 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -235,7 +235,7 @@ struct ice_lock {
ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
struct ice_dma_mem *mem, u64 size)
{
- static uint64_t ice_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) ice_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -243,7 +243,7 @@ struct ice_lock {
return NULL;
snprintf(z_name, sizeof(z_name), "ice_dma_%" PRIu64,
- __atomic_fetch_add(&ice_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&ice_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
0, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7f8f516..204d4ea 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -764,7 +764,7 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
rte_spinlock_init(&hw->vc_cmd_queue_lock);
TAILQ_INIT(&hw->vc_cmd_queue);
- __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->vsi_update_thread_num, 0, rte_memory_order_relaxed);
hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
if (hw->arq_buf == NULL) {
@@ -888,8 +888,8 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
ice_dcf_dev_interrupt_handler, hw);
/* Wait for all `ice-thread` threads to exit. */
- while (__atomic_load_n(&hw->vsi_update_thread_num,
- __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(&hw->vsi_update_thread_num,
+ rte_memory_order_acquire) != 0)
rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
ice_dcf_mode_disable(hw);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index aa2a723..7726681 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -105,7 +105,7 @@ struct ice_dcf_hw {
void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen);
- int vsi_update_thread_num;
+ RTE_ATOMIC(int) vsi_update_thread_num;
uint8_t *arq_buf;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index d58ec9d..8f3a385 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -1743,7 +1743,7 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
ice_dcf_adminq_need_retry(struct ice_adapter *ad)
{
return ad->hw.dcf_enabled &&
- !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+ !rte_atomic_load_explicit(&ad->dcf_state_on, rte_memory_order_relaxed);
}
/* Add UDP tunneling port */
@@ -1944,12 +1944,12 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
return -1;
}
- __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true, rte_memory_order_relaxed);
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 6e845f4..a478b69 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -123,8 +123,8 @@ struct ice_dcf_reset_event_param {
container_of(hw, struct ice_dcf_adapter, real_hw);
struct ice_adapter *parent_adapter = &adapter->parent;
- __atomic_fetch_add(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_relaxed);
rte_thread_detach(rte_thread_self());
@@ -133,8 +133,8 @@ struct ice_dcf_reset_event_param {
rte_spinlock_lock(&vsi_update_lock);
if (!ice_dcf_handle_vsi_update_event(hw)) {
- __atomic_store_n(&parent_adapter->dcf_state_on, true,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true,
+ rte_memory_order_relaxed);
ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
}
@@ -156,8 +156,8 @@ struct ice_dcf_reset_event_param {
free(param);
- __atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_release);
return 0;
}
@@ -269,8 +269,8 @@ struct ice_dcf_reset_event_param {
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
start_vsi_reset_thread(dcf_hw, true,
pf_msg->event_data.vf_vsi_map.vf_id);
break;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 87385d2..0f35c6a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4062,9 +4062,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = &dev->data->dev_link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
@@ -4078,9 +4078,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1a848b3..6cba643 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -621,7 +621,7 @@ struct ice_adapter {
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
/* True if DCF state of the associated PF is on */
- bool dcf_state_on;
+ RTE_ATOMIC(bool) dcf_state_on;
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 05/45] net/i40e: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (3 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 04/45] net/ice: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 06/45] net/hns3: " Tyler Retzlaff
` (39 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/i40e/i40e_ethdev.c | 4 ++--
drivers/net/i40e/i40e_rxtx.c | 6 +++---
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 380ce1a..801cc95 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -4687,7 +4687,7 @@ enum i40e_status_code
u64 size,
u32 alignment)
{
- static uint64_t i40e_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) i40e_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -4695,7 +4695,7 @@ enum i40e_status_code
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
- __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&i40e_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 5d25ab4..155f243 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -486,7 +486,7 @@
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
@@ -745,7 +745,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
@@ -867,7 +867,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index d873e30..3a99137 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -425,7 +425,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 06/45] net/hns3: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (4 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 05/45] net/i40e: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 07/45] net/bnxt: " Tyler Retzlaff
` (38 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/hns3/hns3_cmd.c | 18 ++++++------
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 +++++++++++------------
drivers/net/hns3/hns3_ethdev.h | 32 ++++++++++-----------
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++++++++++++++++++--------------------
drivers/net/hns3/hns3_intr.c | 36 +++++++++++------------
drivers/net/hns3/hns3_intr.h | 4 +--
drivers/net/hns3/hns3_mbx.c | 6 ++--
drivers/net/hns3/hns3_mp.c | 6 ++--
drivers/net/hns3/hns3_rxtx.c | 10 +++----
drivers/net/hns3/hns3_tm.c | 4 +--
11 files changed, 107 insertions(+), 107 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 001ff49..3c5fdbe 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,12 @@
hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
uint64_t size, uint32_t alignment)
{
- static uint64_t hns3_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
- __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
@@ -198,8 +198,8 @@
hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
csq->next_to_use, csq->next_to_clean);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- __atomic_store_n(&hw->reset.disable_cmd, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+ rte_memory_order_relaxed);
hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
}
@@ -313,7 +313,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (hns3_cmd_csq_done(hw))
return 0;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw,
"Don't wait for reply because of disable_cmd");
return -EBUSY;
@@ -360,7 +360,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
int retval;
uint32_t ntc;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -747,7 +747,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
ret = -EBUSY;
goto err_cmd_init;
}
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
ret = hns3_cmd_query_firmware_version_and_capability(hw);
if (ret) {
@@ -790,7 +790,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
return 0;
err_cmd_init:
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return ret;
}
@@ -819,7 +819,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (!hns->is_vf)
(void)hns3_firmware_compat_config(hw, false);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
/*
* A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 915e4eb..2f917fe 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,7 @@
* and configured directly to the hardware in the RESET_STAGE_RESTORE
* stage of the reset process.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a..327f6fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ struct hns3_intr_state {
};
static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
@@ -134,7 +134,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
hw->reset.stats.imp_cnt++;
@@ -148,7 +148,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
hw->reset.stats.global_cnt++;
@@ -1151,7 +1151,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* ensure that the hardware configuration remains unchanged before and
* after reset.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
}
@@ -1175,7 +1175,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* we will restore configurations to hardware in hns3_restore_vlan_table
* and hns3_restore_vlan_conf later.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
if (ret) {
hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5059,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5150,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5158,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
return ret;
hw->mac.link_status = RTE_ETH_LINK_DOWN;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -5184,7 +5184,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_tm_dev_stop_proc(hw);
hns3_config_mac_tnl_int(hw, false);
hns3_stop_tqps(hw);
@@ -5577,7 +5577,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
last_req = hns3_get_reset_level(hns, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -5677,7 +5677,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
}
static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
struct hns3_hw *hw = &hns->hw;
enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5737,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* any mailbox handling or command to firmware is only valid
* after hns3_cmd_init is called.
*/
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hw->reset.stats.request_cnt++;
break;
case HNS3_IMP_RESET:
@@ -5792,7 +5792,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -5913,10 +5913,10 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5925,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Check if there is any ongoing reset in the hardware. This status can
@@ -6576,7 +6576,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e70c5ff..4c0f076 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -401,17 +401,17 @@ enum hns3_schedule {
struct hns3_reset_data {
enum hns3_reset_stage stage;
- uint16_t schedule;
+ RTE_ATOMIC(uint16_t) schedule;
/* Reset flag, covering the entire reset process */
- uint16_t resetting;
+ RTE_ATOMIC(uint16_t) resetting;
/* Used to disable sending cmds during reset */
- uint16_t disable_cmd;
+ RTE_ATOMIC(uint16_t) disable_cmd;
/* The reset level being processed */
enum hns3_reset_level level;
/* Reset level set, each bit represents a reset level */
- uint64_t pending;
+ RTE_ATOMIC(uint64_t) pending;
/* Request reset level set, from interrupt or mailbox */
- uint64_t request;
+ RTE_ATOMIC(uint64_t) request;
int attempts; /* Reset failure retry */
int retries; /* Timeout failure retry in reset_post */
/*
@@ -499,7 +499,7 @@ struct hns3_hw {
* by dev_set_link_up() or dev_start().
*/
bool set_link_down;
- unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+ RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */
struct hns3_tqp_stats tqp_stats;
/* Include Mac stats | Rx stats | Tx stats */
struct hns3_mac_stats mac_stats;
@@ -844,7 +844,7 @@ struct hns3_vf {
struct hns3_adapter *adapter;
/* Whether PF support push link status change to VF */
- uint16_t pf_push_lsc_cap;
+ RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
/*
* If PF support push link status change, VF still need send request to
@@ -853,7 +853,7 @@ struct hns3_vf {
*/
uint16_t req_link_info_cnt;
- uint16_t poll_job_started; /* whether poll job is started */
+ RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
};
struct hns3_adapter {
@@ -997,32 +997,32 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
hns3_read_reg((a)->io_base, (reg))
static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t res;
- res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+ res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & (1UL << nr)) != 0;
return res;
}
static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_or_explicit(addr, (1UL << nr), rte_memory_order_relaxed);
}
static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), rte_memory_order_relaxed);
}
static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t mask = (1UL << nr);
- return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+ return rte_atomic_fetch_and_explicit(addr, ~mask, rte_memory_order_relaxed) & mask;
}
int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a..b83d5b9 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
};
static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
@@ -484,7 +484,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* MTU value issued by hns3 VF PMD must be less than or equal to
* PF's MTU.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "Failed to set mtu during resetting");
return -EIO;
}
@@ -565,7 +565,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
val = hns3_read_dev(hw, HNS3_VF_RST_ING);
hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
}
static void
@@ -650,8 +650,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
struct hns3_vf_to_pf_msg req;
- __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+ rte_memory_order_release);
hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* mailbox from PF driver to get this capability.
*/
hns3vf_handle_mbx_msg(hw);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
break;
remain_ms--;
@@ -677,10 +677,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* state: unknown (means pf not ack), not_supported, supported.
* Here config it as 'not_supported' when it's 'unknown' state.
*/
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
hns3_info(hw, "detect PF support push link status change!");
} else {
@@ -920,7 +920,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
bool send_req;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return;
send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +956,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* sending request to PF kernel driver, then could update link status by
* process PF kernel driver's link status mailbox message.
*/
- if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
return;
if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +994,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_hw *hw = &hns->hw;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw,
"vf set vlan id failed during resetting, vlan_id =%u",
vlan_id);
@@ -1059,7 +1059,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
unsigned int tmp_mask;
int ret = 0;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
mask);
return -EIO;
@@ -1252,7 +1252,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
- __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
hns3vf_service_handler(dev);
}
@@ -1264,7 +1264,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rte_eal_alarm_cancel(hns3vf_service_handler, dev);
- __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
}
static int
@@ -1500,10 +1500,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -1528,7 +1528,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1643,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1773,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -1847,7 +1847,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (ret)
return ret;
}
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return 0;
}
@@ -1888,7 +1888,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* from table space. Hence, for function reset software intervention is
* required to delete the entries.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -2030,7 +2030,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
}
static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
enum hns3_reset_level reset_level;
@@ -2070,10 +2070,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2082,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Hardware reset has been notified, we now have to poll & check if
@@ -2278,7 +2278,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30..26fa2eb 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ enum hns3_hw_err_report_type {
static int
hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
- int num, uint64_t *levels,
+ int num, RTE_ATOMIC(uint64_t) *levels,
enum hns3_hw_err_report_type err_type)
{
const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,7 @@ enum hns3_hw_err_report_type {
hw->reset.request = 0;
hw->reset.pending = 0;
hw->reset.resetting = 0;
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
if (!hw->reset.wait_data) {
@@ -2419,8 +2419,8 @@ enum hns3_hw_err_report_type {
/* Reschedule the reset process after successful initialization */
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+ rte_memory_order_relaxed);
return;
}
@@ -2428,15 +2428,15 @@ enum hns3_hw_err_report_type {
return;
/* Schedule restart alarm if it is not scheduled yet */
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_REQUESTED)
return;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED)
rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
}
@@ -2453,11 +2453,11 @@ enum hns3_hw_err_report_type {
return;
}
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) !=
SCHEDULE_NONE)
return;
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
}
@@ -2537,7 +2537,7 @@ enum hns3_hw_err_report_type {
}
static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
uint64_t merge_cnt = hw->reset.stats.merge_cnt;
uint64_t tmp;
@@ -2633,7 +2633,7 @@ enum hns3_hw_err_report_type {
* Regardless of whether the execution is successful or not, the
* flow after execution must be continued.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
(void)hns3_cmd_init(hw);
reset_fail:
hw->reset.attempts = 0;
@@ -2661,7 +2661,7 @@ enum hns3_hw_err_report_type {
int ret;
if (hw->reset.stage == RESET_STAGE_NONE) {
- __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_DOWN;
hns3_report_reset_begin(hw);
ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2750,7 @@ enum hns3_hw_err_report_type {
hns3_notify_reset_ready(hw, false);
hns3_clear_reset_level(hw, &hw->reset.pending);
hns3_clear_reset_status(hw);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.attempts = 0;
hw->reset.stats.success_cnt++;
hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2812,7 @@ enum hns3_hw_err_report_type {
hw->reset.mbuf_deferred_free = false;
}
rte_spinlock_unlock(&hw->lock);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_NONE;
hns3_clock_gettime(&tv);
timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c07..1edb07d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
};
int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
void hns3_handle_error(struct hns3_adapter *hns);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc16..10c6e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,7 @@
mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
while (wait_time < mbx_time_limit) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw, "Don't wait for mbx response because of "
"disable_cmd");
return -EBUSY;
@@ -382,7 +382,7 @@
rte_spinlock_lock(&hw->cmq.crq.lock);
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
@@ -457,7 +457,7 @@
}
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f194..ba8f8ec 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,7 @@
int i;
if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
- __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+ rte_atomic_load_explicit(&hw->secondary_cnt, rte_memory_order_relaxed) == 0)
return;
if (!mp_req_type_is_valid(type)) {
@@ -277,7 +277,7 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev)
ret);
return ret;
}
- __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
} else {
ret = hns3_mp_init_primary();
if (ret) {
@@ -297,7 +297,7 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
process_data.eth_dev_cnt--;
if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 7e636a0..73a388b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4464,7 +4464,7 @@
struct hns3_adapter *hns = eth_dev->data->dev_private;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+ rte_atomic_load_explicit(&hns->hw.reset.resetting, rte_memory_order_relaxed) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4530,7 +4530,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4586,7 +4586,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4615,7 +4615,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4648,7 +4648,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d969164..92a6685 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
/* don't goto fail_clear, user may try later */
@@ -1141,7 +1141,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
return -EBUSY;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 07/45] net/bnxt: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (5 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 06/45] net/hns3: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 08/45] net/cpfl: " Tyler Retzlaff
` (37 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/bnxt/bnxt_cpr.h | 4 ++--
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 ++++++++-----
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 ++--
5 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index c7b3480..43f06fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -107,7 +107,7 @@ struct bnxt_cp_ring_info {
/**
* Check validity of a completion ring entry. If the entry is valid, include a
- * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * C11 rte_memory_order_acquire fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
@@ -130,7 +130,7 @@ struct bnxt_cp_ring_info {
expected = !(raw_cons & ring_size);
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
if (valid == expected) {
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
return true;
}
return false;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 77bc382..36e0ac3 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- uint64_t rx_mbuf_alloc_fail;
+ RTE_ATOMIC(uint64_t) rx_mbuf_alloc_fail;
uint8_t need_realloc;
const struct rte_memzone *mz;
};
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 3542975..ca5d2c6 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -49,7 +49,8 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -86,7 +87,8 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -465,7 +467,8 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -1677,8 +1680,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return -ENOMEM;
}
}
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 775400f..04864e0 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -240,7 +240,7 @@
rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
/* Use acquire fence to order loads of descriptor words. */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Reload lower 64b of descriptors to make it ordered after info3_v. */
rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
((void *)&cpr->cp_desc_ring[cons + 7],
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 6a6feab..479f819 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -663,7 +663,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -724,7 +724,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 08/45] net/cpfl: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (6 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 07/45] net/bnxt: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 09/45] net/af_xdp: " Tyler Retzlaff
` (36 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ef19aa1..5b47e22 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -300,8 +300,9 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed +=
+ rte_atomic_load_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -349,7 +350,8 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 09/45] net/af_xdp: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (7 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 08/45] net/cpfl: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 10/45] net/octeon_ep: " Tyler Retzlaff
` (35 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 268a130..4833180 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -116,7 +116,7 @@ struct xsk_umem_info {
const struct rte_memzone *mz;
struct rte_mempool *mb_pool;
void *buffer;
- uint8_t refcnt;
+ RTE_ATOMIC(uint8_t) refcnt;
uint32_t max_xsks;
};
@@ -995,7 +995,8 @@ static int link_xdp_prog_with_dev(int ifindex, int fd, __u32 flags)
break;
xsk_socket__delete(rxq->xsk);
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1,
+ rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
/* free pkt_tx_queue */
@@ -1097,8 +1098,8 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
ret = -1;
goto out;
}
- if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
- __ATOMIC_ACQUIRE)) {
+ if (rte_atomic_load_explicit(&internals->rx_queues[i].umem->refcnt,
+ rte_memory_order_acquire)) {
*umem = internals->rx_queues[i].umem;
goto out;
}
@@ -1131,11 +1132,11 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
return NULL;
if (umem != NULL &&
- __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
+ rte_atomic_load_explicit(&umem->refcnt, rte_memory_order_acquire) <
umem->max_xsks) {
AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
internals->if_name, rxq->xsk_queue_idx);
- __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&umem->refcnt, 1, rte_memory_order_acquire);
}
}
@@ -1177,7 +1178,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
mb_pool->name, umem->max_xsks);
}
- __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&umem->refcnt, 1, rte_memory_order_release);
}
return umem;
@@ -1606,7 +1607,8 @@ struct msg_internal {
if (rxq->umem == NULL)
return -ENOMEM;
txq->umem = rxq->umem;
- reserve_before = __atomic_load_n(&rxq->umem->refcnt, __ATOMIC_ACQUIRE) <= 1;
+ reserve_before = rte_atomic_load_explicit(&rxq->umem->refcnt,
+ rte_memory_order_acquire) <= 1;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
@@ -1723,7 +1725,7 @@ struct msg_internal {
out_xsk:
xsk_socket__delete(rxq->xsk);
out_umem:
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1, rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
return ret;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 10/45] net/octeon_ep: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (8 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 09/45] net/af_xdp: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 11/45] net/octeontx: " Tyler Retzlaff
` (34 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +++--
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +++--
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx2_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx_ep_common.h | 4 ++--
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 ++++--
6 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.h b/drivers/net/octeon_ep/cnxk_ep_rx.h
index ecf95cd..9422042 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.h
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.h
@@ -98,7 +98,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(droq->pkts_sent_ism, rte_memory_order_relaxed);
new_pkts = val - droq->pkts_sent_prev;
droq->pkts_sent_prev = val;
@@ -111,7 +111,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_tx.c b/drivers/net/octeon_ep/cnxk_ep_tx.c
index 233c8aa..e093140 100644
--- a/drivers/net/octeon_ep/cnxk_ep_tx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_tx.c
@@ -15,7 +15,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(iq->inst_cnt_ism, rte_memory_order_relaxed);
iq->inst_cnt += val - iq->inst_cnt_prev;
iq->inst_cnt_prev = val;
@@ -27,7 +27,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 39f357e..39b28de 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -150,10 +150,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
- (void *)iq->inst_cnt_ism, ism_addr);
+ (void *)(uintptr_t)iq->inst_cnt_ism, ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
@@ -235,10 +235,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
- oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ oq_no, (void *)(uintptr_t)droq->pkts_sent_ism, ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 25e0e5a..2aeebb4 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -300,10 +300,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
- (void *)iq->inst_cnt_ism,
+ (void *)(uintptr_t)iq->inst_cnt_ism,
(unsigned int)ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
@@ -386,10 +386,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
- (void *)droq->pkts_sent_ism,
+ (void *)(uintptr_t)droq->pkts_sent_ism,
(unsigned int)ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 7776940..73eb0c9 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -218,7 +218,7 @@ struct otx_ep_iq_config {
*/
struct otx_ep_instr_queue {
/* Location in memory updated by SDP ISM */
- uint32_t *inst_cnt_ism;
+ RTE_ATOMIC(uint32_t) *inst_cnt_ism;
struct rte_mbuf **mbuf_list;
/* Pointer to the Virtual Base addr of the input ring. */
uint8_t *base_addr;
@@ -413,7 +413,7 @@ struct otx_ep_droq {
uint8_t ism_ena;
/* Pointer to host memory copy of output packet count, set by ISM */
- uint32_t *pkts_sent_ism;
+ RTE_ATOMIC(uint32_t) *pkts_sent_ism;
uint32_t pkts_sent_prev;
/* Statistics for this DROQ. */
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 59144e0..eb2d8c1 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -475,7 +475,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
@@ -871,7 +872,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 11/45] net/octeontx: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (9 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 10/45] net/octeon_ep: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:16 ` [PATCH v2 12/45] net/cxgbe: " Tyler Retzlaff
` (33 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/octeontx/octeontx_ethdev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index bec54fd..64d1666 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -31,7 +31,7 @@
/* Useful in stopping/closing event device if no of
* eth ports are using it.
*/
-uint16_t evdev_refcnt;
+RTE_ATOMIC(uint16_t) evdev_refcnt;
#define OCTEONTX_QLM_MODE_SGMII 7
#define OCTEONTX_QLM_MODE_XFI 12
@@ -559,7 +559,7 @@ enum octeontx_link_speed {
return 0;
/* Stopping/closing event device once all eth ports are closed. */
- if (__atomic_fetch_sub(&evdev_refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&evdev_refcnt, 1, rte_memory_order_acquire) - 1 == 0) {
rte_event_dev_stop(nic->evdev);
rte_event_dev_close(nic->evdev);
}
@@ -1593,7 +1593,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
nic->pko_vfid = pko_vfid;
nic->port_id = port;
nic->evdev = evdev;
- __atomic_fetch_add(&evdev_refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&evdev_refcnt, 1, rte_memory_order_acquire);
res = octeontx_port_open(nic);
if (res < 0)
@@ -1844,7 +1844,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
}
}
- __atomic_store_n(&evdev_refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&evdev_refcnt, 0, rte_memory_order_release);
/*
* Do 1:1 links for ports & queues. All queues would be mapped to
* one port. If there are more ports than queues, then some ports
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 12/45] net/cxgbe: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (10 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 11/45] net/octeontx: " Tyler Retzlaff
@ 2024-03-21 19:16 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 13/45] net/gve: " Tyler Retzlaff
` (32 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:16 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/cxgbe/clip_tbl.c | 12 ++++++------
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 ++++++++++----------
drivers/net/cxgbe/cxgbe_ofld.h | 6 +++---
drivers/net/cxgbe/l2t.c | 12 ++++++------
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +++++++++++----------
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 ++++++------
drivers/net/cxgbe/smt.h | 2 +-
10 files changed, 46 insertions(+), 45 deletions(-)
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
index b709e26..8588b88 100644
--- a/drivers/net/cxgbe/clip_tbl.c
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -55,7 +55,7 @@ void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
int ret;
t4_os_lock(&ce->lock);
- if (__atomic_fetch_sub(&ce->refcnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ce->refcnt, 1, rte_memory_order_relaxed) - 1 == 0) {
ret = clip6_release_mbox(dev, ce->addr);
if (ret)
dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
@@ -79,7 +79,7 @@ static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
unsigned int clipt_size = c->clipt_size;
for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -114,12 +114,12 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce = find_or_alloc_clipe(ctbl, lip);
if (ce) {
t4_os_lock(&ce->lock);
- if (__atomic_load_n(&ce->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&ce->refcnt, rte_memory_order_relaxed) == 0) {
rte_memcpy(ce->addr, lip, sizeof(ce->addr));
if (v6) {
ce->type = FILTER_TYPE_IPV6;
- __atomic_store_n(&ce->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ce->refcnt, 1,
+ rte_memory_order_relaxed);
ret = clip6_get_mbox(dev, lip);
if (ret)
dev_debug(adap,
@@ -129,7 +129,7 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce->type = FILTER_TYPE_IPV4;
}
} else {
- __atomic_fetch_add(&ce->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ce->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&ce->lock);
}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
index 3b2be66..439fcf6 100644
--- a/drivers/net/cxgbe/clip_tbl.h
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -13,7 +13,7 @@ struct clip_entry {
enum filter_type type; /* entry type */
u32 addr[4]; /* IPV4 or IPV6 address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct clip_tbl {
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index c479454..2ed21f2 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -418,15 +418,15 @@ void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- __atomic_fetch_sub(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
}
@@ -448,15 +448,15 @@ void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
t->tid_tab[tid] = data;
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
- __atomic_fetch_add(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
index 33697c7..48a5ec0 100644
--- a/drivers/net/cxgbe/cxgbe_ofld.h
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -60,10 +60,10 @@ struct tid_info {
unsigned int atids_in_use;
/* TIDs in the TCAM */
- u32 tids_in_use;
+ RTE_ATOMIC(u32) tids_in_use;
/* TIDs in the HASH */
- u32 hash_tids_in_use;
- u32 conns_in_use;
+ RTE_ATOMIC(u32) hash_tids_in_use;
+ RTE_ATOMIC(u32) conns_in_use;
rte_spinlock_t atid_lock __rte_cache_aligned;
rte_spinlock_t ftid_lock;
diff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c
index 21f4019..ecb5fec 100644
--- a/drivers/net/cxgbe/l2t.c
+++ b/drivers/net/cxgbe/l2t.c
@@ -14,8 +14,8 @@
*/
void cxgbe_l2t_release(struct l2t_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -112,7 +112,7 @@ static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
struct l2t_entry *first_free = NULL;
for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -151,18 +151,18 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
diff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h
index e4c0ebe..67d0197 100644
--- a/drivers/net/cxgbe/l2t.h
+++ b/drivers/net/cxgbe/l2t.h
@@ -30,7 +30,7 @@ struct l2t_entry {
u8 lport; /* destination port */
u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct l2t_data {
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
index 8e0da9c..79a7daa 100644
--- a/drivers/net/cxgbe/mps_tcam.c
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -76,7 +76,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- __atomic_fetch_add(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
@@ -98,7 +98,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
entry = &mpstcam->entry[ret];
memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
if (cxgbe_update_free_idx(mpstcam))
@@ -147,7 +147,7 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
* provided value is -1
*/
if (entry->state == MPS_ENTRY_UNUSED) {
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
}
@@ -165,7 +165,7 @@ static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
{
memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_UNUSED;
}
@@ -190,12 +190,13 @@ int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
return -EINVAL;
}
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
entry->mask, idx, 1, pi->port_id,
false);
else
- ret = __atomic_fetch_sub(&entry->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&entry->refcnt, 1,
+ rte_memory_order_relaxed) - 1;
if (ret == 0) {
reset_mpstcam_entry(entry);
@@ -222,7 +223,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
goto out_unlock;
ret = t4_alloc_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -231,7 +232,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
@@ -253,7 +254,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) != 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) != 1)
goto out_unlock;
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -262,7 +263,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
diff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h
index 363786b..4b421f7 100644
--- a/drivers/net/cxgbe/mps_tcam.h
+++ b/drivers/net/cxgbe/mps_tcam.h
@@ -29,7 +29,7 @@ struct mps_tcam_entry {
u8 mask[RTE_ETHER_ADDR_LEN];
struct mpstcam_table *mpstcam; /* backptr */
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
};
struct mpstcam_table {
diff --git a/drivers/net/cxgbe/smt.c b/drivers/net/cxgbe/smt.c
index 4e14a73..2f961c1 100644
--- a/drivers/net/cxgbe/smt.c
+++ b/drivers/net/cxgbe/smt.c
@@ -119,7 +119,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end, *first_free = NULL;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -156,7 +156,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_smte(s, smac);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->pfvf = pfvf;
rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
ret = write_smt_entry(dev, e);
@@ -168,9 +168,9 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
goto out_write_unlock;
}
e->state = SMT_STATE_SWITCHING;
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
@@ -195,8 +195,8 @@ struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
void cxgbe_smt_release(struct smt_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/smt.h b/drivers/net/cxgbe/smt.h
index 531810e..8b378ae 100644
--- a/drivers/net/cxgbe/smt.h
+++ b/drivers/net/cxgbe/smt.h
@@ -23,7 +23,7 @@ struct smt_entry {
u16 pfvf;
u16 hw_idx;
u8 src_mac[RTE_ETHER_ADDR_LEN];
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
rte_spinlock_t lock;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 13/45] net/gve: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (11 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 12/45] net/cxgbe: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 14/45] net/memif: " Tyler Retzlaff
` (31 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/gve/base/gve_osdep.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/gve/base/gve_osdep.h b/drivers/net/gve/base/gve_osdep.h
index a3702f4..c0ee0d5 100644
--- a/drivers/net/gve/base/gve_osdep.h
+++ b/drivers/net/gve/base/gve_osdep.h
@@ -135,7 +135,7 @@ struct gve_dma_mem {
static inline void *
gve_alloc_dma_mem(struct gve_dma_mem *mem, u64 size)
{
- static uint16_t gve_dma_memzone_id;
+ static RTE_ATOMIC(uint16_t) gve_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -143,7 +143,7 @@ struct gve_dma_mem {
return NULL;
snprintf(z_name, sizeof(z_name), "gve_dma_%u",
- __atomic_fetch_add(&gve_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&gve_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG,
PAGE_SIZE);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 14/45] net/memif: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (12 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 13/45] net/gve: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 15/45] net/thunderx: " Tyler Retzlaff
` (30 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/memif/memif.h | 4 ++--
drivers/net/memif/rte_eth_memif.c | 50 +++++++++++++++++++--------------------
2 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/memif/memif.h b/drivers/net/memif/memif.h
index cb72c69..ccaa218 100644
--- a/drivers/net/memif/memif.h
+++ b/drivers/net/memif/memif.h
@@ -169,9 +169,9 @@ typedef struct __rte_packed __rte_aligned(128)
uint32_t cookie; /**< MEMIF_COOKIE */
uint16_t flags; /**< flags */
#define MEMIF_RING_FLAG_MASK_INT 1 /**< disable interrupt mode */
- uint16_t head; /**< pointer to ring buffer head */
+ RTE_ATOMIC(uint16_t) head; /**< pointer to ring buffer head */
MEMIF_CACHELINE_ALIGN_MARK(cacheline1);
- uint16_t tail; /**< pointer to ring buffer tail */
+ RTE_ATOMIC(uint16_t) tail; /**< pointer to ring buffer tail */
MEMIF_CACHELINE_ALIGN_MARK(cacheline2);
memif_desc_t desc[0]; /**< buffer descriptors */
} memif_ring_t;
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 18377d9..16da22b 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -262,7 +262,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* in function eth_memif_rx for C2S queues.
*/
- cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ cur_tail = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
while (mq->last_tail != cur_tail) {
RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
@@ -334,10 +334,10 @@ struct mp_region_msg {
if (type == MEMIF_RING_C2S) {
cur_slot = mq->last_head;
- last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire);
} else {
cur_slot = mq->last_tail;
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
}
if (cur_slot == last_slot)
@@ -473,7 +473,7 @@ struct mp_region_msg {
no_free_bufs:
if (type == MEMIF_RING_C2S) {
- __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, cur_slot, rte_memory_order_release);
mq->last_head = cur_slot;
} else {
mq->last_tail = cur_slot;
@@ -485,7 +485,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
while (n_slots--) {
@@ -493,7 +493,7 @@ struct mp_region_msg {
d0 = &ring->desc[s0];
d0->length = pmd->run.pkt_buffer_size;
}
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
}
mq->n_pkts += n_rx_pkts;
@@ -541,7 +541,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* to synchronize it between threads.
*/
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
if (cur_slot == last_slot)
goto refill;
n_slots = last_slot - cur_slot;
@@ -591,7 +591,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
if (n_slots < 32)
@@ -620,7 +620,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_tx.
*/
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
mq->n_pkts += n_rx_pkts;
@@ -668,9 +668,9 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot +
- __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
} else {
/* For S2C queues ring->tail is updated by the sender and
* this function is called in the context of sending thread.
@@ -678,8 +678,8 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
- n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
+ slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_relaxed);
+ n_free = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire) - slot;
}
uint16_t i;
@@ -792,9 +792,9 @@ struct mp_region_msg {
no_free_slots:
if (type == MEMIF_RING_C2S)
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
else
- __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, slot, rte_memory_order_release);
if (((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) &&
(rte_intr_fd_get(mq->intr_handle) >= 0)) {
@@ -882,7 +882,7 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot + mq->last_tail;
int used_slots;
@@ -942,7 +942,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_rx for C2S rings.
*/
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
/* Send interrupt, if enabled. */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
@@ -1155,8 +1155,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_c2s_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1175,8 +1175,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_s2c_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1314,8 +1314,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
@@ -1330,8 +1330,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 15/45] net/thunderx: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (13 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 14/45] net/memif: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 16/45] net/virtio: " Tyler Retzlaff
` (29 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/thunderx/nicvf_rxtx.c | 9 +++++----
drivers/net/thunderx/nicvf_struct.h | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index defa551..2cb6a99 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -374,8 +374,8 @@
NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
(nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
- next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
- __ATOMIC_ACQUIRE);
+ next_tail = rte_atomic_fetch_add_explicit(&rbdr->next_tail, to_fill,
+ rte_memory_order_acquire);
ltail = next_tail;
for (i = 0; i < to_fill; i++) {
struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
@@ -385,9 +385,10 @@
ltail++;
}
- rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&rbdr->tail, next_tail,
+ rte_memory_order_relaxed);
- __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&rbdr->tail, ltail, rte_memory_order_release);
nicvf_addr_write(door, to_fill);
return to_fill;
}
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 13cf8fe..6507898 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -20,8 +20,8 @@ struct nicvf_rbdr {
struct rbdr_entry_t *desc;
nicvf_iova_addr_t phys;
uint32_t buffsz;
- uint32_t tail;
- uint32_t next_tail;
+ RTE_ATOMIC(uint32_t) tail;
+ RTE_ATOMIC(uint32_t) next_tail;
uint32_t head;
uint32_t qlen_mask;
} __rte_cache_aligned;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 16/45] net/virtio: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (14 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 15/45] net/thunderx: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 17/45] net/hinic: " Tyler Retzlaff
` (28 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/virtio/virtio_ring.h | 4 +--
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 ++++-----
drivers/net/virtio/virtqueue.h | 32 ++++++++++++------------
3 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b..2a25751 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -59,7 +59,7 @@ struct vring_used_elem {
struct vring_used {
uint16_t flags;
- uint16_t idx;
+ RTE_ATOMIC(uint16_t) idx;
struct vring_used_elem ring[];
};
@@ -70,7 +70,7 @@ struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t id;
- uint16_t flags;
+ RTE_ATOMIC(uint16_t) flags;
};
#define RING_EVENT_FLAGS_ENABLE 0x0
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4fdfe70..24e2b2c 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -948,7 +948,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+ uint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -1037,8 +1037,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vring->desc[vq->used_idx].flags, flags,
+ rte_memory_order_release);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {
@@ -1057,9 +1057,9 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
struct vring *vring = &dev->vrings.split[queue_idx];
/* Consume avail ring, using used ring idx as first one */
- while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ while (rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
!= vring->avail->idx) {
- avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ avail_idx = rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
& (vring->num - 1);
desc_idx = vring->avail->ring[avail_idx];
@@ -1070,7 +1070,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
uep->id = desc_idx;
uep->len = n_descs;
- __atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&vring->used->idx, 1, rte_memory_order_relaxed);
}
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 5d0c039..b7bbdde 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -37,7 +37,7 @@
virtio_mb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
else
rte_mb();
}
@@ -46,7 +46,7 @@
virtio_rmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
else
rte_io_rmb();
}
@@ -55,7 +55,7 @@
virtio_wmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
else
rte_io_wmb();
}
@@ -67,12 +67,12 @@
uint16_t flags;
if (weak_barriers) {
-/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
+/* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+ flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);
#else
flags = dp->flags;
rte_io_rmb();
@@ -90,12 +90,12 @@
uint16_t flags, uint8_t weak_barriers)
{
if (weak_barriers) {
-/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
+/* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);
#else
rte_io_wmb();
dp->flags = flags;
@@ -425,7 +425,7 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
if (vq->hw->weak_barriers) {
/**
- * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+ * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it
* reports a slightly better perf, which comes from the saved
* branch by the compiler.
* The if and else branches are identical with the smp and io
@@ -435,8 +435,8 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
idx = vq->vq_split.ring.used->idx;
rte_smp_rmb();
#else
- idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
- __ATOMIC_ACQUIRE);
+ idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,
+ rte_memory_order_acquire);
#endif
} else {
idx = vq->vq_split.ring.used->idx;
@@ -454,7 +454,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
vq_update_avail_idx(struct virtqueue *vq)
{
if (vq->hw->weak_barriers) {
- /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+ /* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as
* it reports a slightly better perf, which comes from the
* saved branch by the compiler.
* The if and else branches are identical with the smp and
@@ -464,8 +464,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
rte_smp_wmb();
vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
#else
- __atomic_store_n(&vq->vq_split.ring.avail->idx,
- vq->vq_avail_idx, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,
+ vq->vq_avail_idx, rte_memory_order_release);
#endif
} else {
rte_io_wmb();
@@ -528,8 +528,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
- used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
- __ATOMIC_RELAXED); \
+ used_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \
+ rte_memory_order_relaxed); \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
if (virtio_with_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
@@ -546,7 +546,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
" avail.flags=0x%x; used.flags=0x%x", \
(vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
(vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
- __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+ rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, rte_memory_order_relaxed), \
(vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
} while (0)
#else
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 17/45] net/hinic: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (15 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 16/45] net/virtio: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 18/45] net/idpf: " Tyler Retzlaff
` (27 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 7adb6e3..c2cd295 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -1004,7 +1004,7 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
while (pkts < nb_pkts) {
/* 2. current ci is done */
rx_cqe = &rxq->rx_cqe[sw_ci];
- status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
+ status = rte_atomic_load_explicit(&rx_cqe->status, rte_memory_order_acquire);
if (!HINIC_GET_RX_DONE_BE(status))
break;
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 5c30339..d77ef51 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -29,7 +29,7 @@ struct hinic_rq_ctrl {
};
struct hinic_rq_cqe {
- u32 status;
+ RTE_ATOMIC(u32) status;
u32 vlan_len;
u32 offload_type;
u32 rss_hash;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 18/45] net/idpf: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (16 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 17/45] net/hinic: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 19/45] net/qede: " Tyler Retzlaff
` (26 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/idpf/idpf_ethdev.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 86151c9..1df4d6b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -259,8 +259,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed += rte_atomic_load_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -308,7 +308,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&rxq->rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 19/45] net/qede: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (17 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 18/45] net/idpf: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 20/45] net/ring: " Tyler Retzlaff
` (25 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/qede/base/bcm_osal.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2edeb38..abd1186 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -51,11 +51,11 @@ void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
/* Counter to track current memzone allocated */
static uint16_t ecore_mz_count;
-static uint32_t ref_cnt;
+static RTE_ATOMIC(uint32_t) ref_cnt;
int ecore_mz_mapping_alloc(void)
{
- if (__atomic_fetch_add(&ref_cnt, 1, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_fetch_add_explicit(&ref_cnt, 1, rte_memory_order_relaxed) == 0) {
ecore_mz_mapping = rte_calloc("ecore_mz_map",
rte_memzone_max_get(), sizeof(struct rte_memzone *), 0);
}
@@ -68,7 +68,7 @@ int ecore_mz_mapping_alloc(void)
void ecore_mz_mapping_free(void)
{
- if (__atomic_fetch_sub(&ref_cnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ref_cnt, 1, rte_memory_order_relaxed) - 1 == 0) {
rte_free(ecore_mz_mapping);
ecore_mz_mapping = NULL;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 20/45] net/ring: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (18 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 19/45] net/qede: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 21/45] vdpa/mlx5: " Tyler Retzlaff
` (24 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/ring/rte_eth_ring.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 48953dd..b16f5d5 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -44,8 +44,8 @@ enum dev_action {
struct ring_queue {
struct rte_ring *rng;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_internals {
@@ -82,7 +82,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts += nb_rx;
else
- __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->rx_pkts, nb_rx, rte_memory_order_relaxed);
return nb_rx;
}
@@ -96,7 +96,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SP_ENQ)
r->tx_pkts += nb_tx;
else
- __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->tx_pkts, nb_tx, rte_memory_order_relaxed);
return nb_tx;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 21/45] vdpa/mlx5: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (19 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 20/45] net/ring: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 22/45] raw/ifpga: " Tyler Retzlaff
` (23 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +++++++++---------
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +++++------
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 ++-
6 files changed, 52 insertions(+), 44 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f900384..98c39a5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -261,8 +261,8 @@
uint32_t timeout = 0;
/* Check and wait all close tasks done. */
- while (__atomic_load_n(&priv->dev_close_progress,
- __ATOMIC_RELAXED) != 0 && timeout < 1000) {
+ while (rte_atomic_load_explicit(&priv->dev_close_progress,
+ rte_memory_order_relaxed) != 0 && timeout < 1000) {
rte_delay_us_sleep(10000);
timeout++;
}
@@ -294,8 +294,8 @@
priv->last_c_thrd_idx = 0;
else
priv->last_c_thrd_idx++;
- __atomic_store_n(&priv->dev_close_progress,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress,
+ 1, rte_memory_order_relaxed);
if (mlx5_vdpa_task_add(priv,
priv->last_c_thrd_idx,
MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
@@ -319,8 +319,8 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress, 0,
+ rte_memory_order_relaxed);
priv->state = MLX5_VDPA_STATE_PROBED;
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
@@ -664,7 +664,9 @@
static int
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t max_queues, index, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
@@ -847,8 +849,8 @@
if (conf_thread_mng.initializer_priv == priv)
if (mlx5_vdpa_mult_threads_create())
goto error;
- __atomic_fetch_add(&conf_thread_mng.refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
+ rte_memory_order_relaxed);
}
if (mlx5_vdpa_create_dev_resources(priv))
goto error;
@@ -937,8 +939,8 @@
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
if (priv->use_c_thread)
- if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
- 1, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
+ 1, rte_memory_order_relaxed) == 1)
mlx5_vdpa_mult_threads_destroy(true);
rte_free(priv);
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 7b37c98..0cc67ed 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type {
struct mlx5_vdpa_task {
struct mlx5_vdpa_priv *priv;
enum mlx5_vdpa_task_type type;
- uint32_t *remaining_cnt;
- uint32_t *err_cnt;
+ RTE_ATOMIC(uint32_t) *remaining_cnt;
+ RTE_ATOMIC(uint32_t) *err_cnt;
uint32_t idx;
} __rte_packed __rte_aligned(4);
@@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread {
struct mlx5_vdpa_conf_thread_mng {
void *initializer_priv;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
uint32_t max_thrds;
pthread_mutex_t cthrd_lock;
struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
@@ -212,7 +212,7 @@ struct mlx5_vdpa_priv {
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
uint16_t last_c_thrd_idx;
- uint16_t dev_close_progress;
+ RTE_ATOMIC(uint16_t) dev_close_progress;
uint16_t num_mrs; /* Number of memory regions. */
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
@@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num);
int
mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time);
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
int
mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
void
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
index 68ed841..84f611c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
@@ -48,7 +48,7 @@
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num)
{
struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
@@ -70,8 +70,8 @@
return -1;
for (i = 0 ; i < num; i++)
if (task[i].remaining_cnt)
- __atomic_fetch_add(task[i].remaining_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
+ rte_memory_order_relaxed);
/* wake up conf thread. */
pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
@@ -80,16 +80,16 @@
}
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time)
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
{
/* Check and wait all tasks done. */
- while (__atomic_load_n(remaining_cnt,
- __ATOMIC_RELAXED) != 0) {
+ while (rte_atomic_load_explicit(remaining_cnt,
+ rte_memory_order_relaxed) != 0) {
rte_delay_us_sleep(sleep_time);
}
- if (__atomic_load_n(err_cnt,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(err_cnt,
+ rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Tasks done with error.");
return true;
}
@@ -137,8 +137,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to register mr %d.", task.idx);
- __atomic_fetch_add(task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task.err_cnt, 1,
+ rte_memory_order_relaxed);
}
break;
case MLX5_VDPA_TASK_SETUP_VIRTQ:
@@ -149,8 +149,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to setup virtq %d.", task.idx);
- __atomic_fetch_add(
- task.err_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ task.err_cnt, 1, rte_memory_order_relaxed);
}
virtq->enable = 1;
pthread_mutex_unlock(&virtq->virtq_lock);
@@ -164,9 +164,9 @@
DRV_LOG(ERR,
"Failed to stop virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -176,9 +176,9 @@
DRV_LOG(ERR,
"Failed to get negotiated features virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -200,9 +200,9 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(
+ rte_atomic_store_explicit(
&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
break;
case MLX5_VDPA_TASK_PREPARE_VIRTQ:
ret = mlx5_vdpa_virtq_single_resource_prepare(
@@ -211,9 +211,9 @@
DRV_LOG(ERR,
"Failed to prepare virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
break;
default:
@@ -222,8 +222,8 @@
break;
}
if (task.remaining_cnt)
- __atomic_fetch_sub(task.remaining_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(task.remaining_cnt,
+ 1, rte_memory_order_relaxed);
}
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 0fa671f..a207734 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -92,7 +92,9 @@
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
uint64_t features;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index e333f0b..4dfe800 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -279,7 +279,9 @@
uint8_t mode = 0;
int ret = -rte_errno;
uint32_t i, thrd_idx, data[1];
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
(priv->vid, &mode, &priv->vmem_info.size,
&priv->vmem_info.gcd, &priv->vmem_info.entries_num);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 607e290..093cdd0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -666,7 +666,9 @@
{
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
struct rte_vhost_vring vq;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 22/45] raw/ifpga: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (20 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 21/45] vdpa/mlx5: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 23/45] event/opdl: " Tyler Retzlaff
` (22 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/raw/ifpga/ifpga_rawdev.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c
index f89bd3f..78d3c88 100644
--- a/drivers/raw/ifpga/ifpga_rawdev.c
+++ b/drivers/raw/ifpga/ifpga_rawdev.c
@@ -73,7 +73,7 @@
static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
-static int ifpga_monitor_refcnt;
+static RTE_ATOMIC(int) ifpga_monitor_refcnt;
static rte_thread_t ifpga_monitor_start_thread;
static struct ifpga_rawdev *
@@ -512,7 +512,7 @@ static int set_surprise_link_check_aer(
int gsd_enable, ret;
#define MS 1000
- while (__atomic_load_n(&ifpga_monitor_refcnt, __ATOMIC_RELAXED)) {
+ while (rte_atomic_load_explicit(&ifpga_monitor_refcnt, rte_memory_order_relaxed)) {
gsd_enable = 0;
for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
ifpga_rdev = &ifpga_rawdevices[i];
@@ -549,7 +549,7 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 1;
- if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_fetch_add_explicit(&ifpga_monitor_refcnt, 1, rte_memory_order_relaxed)) {
ret = rte_thread_create_internal_control(&ifpga_monitor_start_thread,
"ifpga-mon", ifpga_rawdev_gsd_handle, NULL);
if (ret != 0) {
@@ -573,7 +573,8 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 0;
- if (!(__atomic_fetch_sub(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED) - 1) &&
+ if (!(rte_atomic_fetch_sub_explicit(&ifpga_monitor_refcnt, 1,
+ rte_memory_order_relaxed) - 1) &&
ifpga_monitor_start_thread.opaque_id != 0) {
ret = pthread_cancel((pthread_t)ifpga_monitor_start_thread.opaque_id);
if (ret)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 23/45] event/opdl: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (21 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 22/45] raw/ifpga: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 24/45] event/octeontx: " Tyler Retzlaff
` (21 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/opdl/opdl_ring.c | 80 +++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index da5ea02..a86bfb8 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -47,12 +47,12 @@ struct shared_state {
/* Last known minimum sequence number of dependencies, used for multi
* thread operation
*/
- uint32_t available_seq;
+ RTE_ATOMIC(uint32_t) available_seq;
char _pad1[RTE_CACHE_LINE_SIZE * 3];
- uint32_t head; /* Head sequence number (for multi thread operation) */
+ RTE_ATOMIC(uint32_t) head; /* Head sequence number (for multi thread operation) */
char _pad2[RTE_CACHE_LINE_SIZE * 3];
struct opdl_stage *stage; /* back pointer */
- uint32_t tail; /* Tail sequence number */
+ RTE_ATOMIC(uint32_t) tail; /* Tail sequence number */
char _pad3[RTE_CACHE_LINE_SIZE * 2];
} __rte_cache_aligned;
@@ -150,10 +150,10 @@ struct opdl_ring {
available(const struct opdl_stage *s)
{
if (s->threadsafe == true) {
- uint32_t n = __atomic_load_n(&s->shared.available_seq,
- __ATOMIC_ACQUIRE) -
- __atomic_load_n(&s->shared.head,
- __ATOMIC_ACQUIRE);
+ uint32_t n = rte_atomic_load_explicit(&s->shared.available_seq,
+ rte_memory_order_acquire) -
+ rte_atomic_load_explicit(&s->shared.head,
+ rte_memory_order_acquire);
/* Return 0 if available_seq needs to be updated */
return (n <= s->num_slots) ? n : 0;
@@ -169,7 +169,7 @@ struct opdl_ring {
{
uint32_t i;
uint32_t this_tail = s->shared.tail;
- uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+ uint32_t min_seq = rte_atomic_load_explicit(&s->deps[0]->tail, rte_memory_order_acquire);
/* Input stage sequence numbers are greater than the sequence numbers of
* its dependencies so an offset of t->num_slots is needed when
* calculating available slots and also the condition which is used to
@@ -180,16 +180,16 @@ struct opdl_ring {
if (is_input_stage(s)) {
wrap = s->num_slots;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((this_tail - seq) > (this_tail - min_seq))
min_seq = seq;
}
} else {
wrap = 0;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((seq - this_tail) < (min_seq - this_tail))
min_seq = seq;
}
@@ -198,8 +198,8 @@ struct opdl_ring {
if (s->threadsafe == false)
s->available_seq = min_seq + wrap;
else
- __atomic_store_n(&s->shared.available_seq, min_seq + wrap,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.available_seq, min_seq + wrap,
+ rte_memory_order_release);
}
/* Wait until the number of available slots reaches number requested */
@@ -299,7 +299,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -382,18 +382,18 @@ struct opdl_ring {
/* There should be no race condition here. If shared.tail
* matches, no other core can update it until this one does.
*/
- if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&s->shared.tail, rte_memory_order_acquire) ==
tail) {
if (num_entries >= (head - tail)) {
claim_mgr_remove(disclaims);
- __atomic_store_n(&s->shared.tail, head,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, head,
+ rte_memory_order_release);
num_entries -= (head - tail);
} else {
claim_mgr_move_tail(disclaims, num_entries);
- __atomic_store_n(&s->shared.tail,
+ rte_atomic_store_explicit(&s->shared.tail,
num_entries + tail,
- __ATOMIC_RELEASE);
+ rte_memory_order_release);
num_entries = 0;
}
} else if (block == false)
@@ -421,7 +421,7 @@ struct opdl_ring {
opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
false);
- *old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+ *old_head = rte_atomic_load_explicit(&s->shared.head, rte_memory_order_acquire);
while (true) {
bool success;
/* If called by opdl_ring_input(), claim does not need to be
@@ -441,11 +441,10 @@ struct opdl_ring {
if (*num_entries == 0)
return;
- success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+ success = rte_atomic_compare_exchange_weak_explicit(&s->shared.head, old_head,
*old_head + *num_entries,
- true, /* may fail spuriously */
- __ATOMIC_RELEASE, /* memory order on success */
- __ATOMIC_ACQUIRE); /* memory order on fail */
+ rte_memory_order_release, /* memory order on success */
+ rte_memory_order_acquire); /* memory order on fail */
if (likely(success))
break;
rte_pause();
@@ -473,10 +472,11 @@ struct opdl_ring {
/* If another thread started inputting before this one, but hasn't
* finished, we need to wait for it to complete to update the tail.
*/
- rte_wait_until_equal_32(&s->shared.tail, old_head, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&s->shared.tail, old_head,
+ rte_memory_order_acquire);
- __atomic_store_n(&s->shared.tail, old_head + num_entries,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, old_head + num_entries,
+ rte_memory_order_release);
return num_entries;
}
@@ -526,8 +526,8 @@ struct opdl_ring {
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- event = __atomic_load_n(&(ev->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -628,8 +628,8 @@ struct opdl_ring {
num_entries, s->head - old_tail);
num_entries = s->head - old_tail;
}
- __atomic_store_n(&s->shared.tail, num_entries + old_tail,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, num_entries + old_tail,
+ rte_memory_order_release);
}
uint32_t
@@ -658,7 +658,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
@@ -677,7 +677,7 @@ struct opdl_ring {
copy_entries_out(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -756,7 +756,7 @@ struct opdl_ring {
return 0;
}
if (s->threadsafe == false) {
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
s->seq += s->num_claimed;
s->shadow_head = s->head;
s->num_claimed = 0;
@@ -1009,8 +1009,8 @@ struct opdl_ring *
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- event = __atomic_load_n(&(ev_orig->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev_orig->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -1027,9 +1027,9 @@ struct opdl_ring *
if ((event & OPDL_EVENT_MASK) !=
ev_temp) {
- __atomic_store_n(&(ev_orig->event),
- ev_update,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(
+ (uint64_t __rte_atomic *)&ev_orig->event,
+ ev_update, rte_memory_order_release);
ev_updated = true;
}
if (ev_orig->u64 != ev->u64) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 24/45] event/octeontx: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (22 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 23/45] event/opdl: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 25/45] event/dsw: " Tyler Retzlaff
` (20 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/octeontx/timvf_evdev.h | 8 ++++----
drivers/event/octeontx/timvf_worker.h | 36 +++++++++++++++++------------------
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index cef02cd..4bfc3d7 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -126,15 +126,15 @@ enum timvf_clk_src {
struct tim_mem_bucket {
uint64_t first_chunk;
union {
- uint64_t w1;
+ RTE_ATOMIC(uint64_t) w1;
struct {
- uint32_t nb_entry;
+ RTE_ATOMIC(uint32_t) nb_entry;
uint8_t sbt:1;
uint8_t hbt:1;
uint8_t bsk:1;
uint8_t rsvd:5;
- uint8_t lock;
- int16_t chunk_remainder;
+ RTE_ATOMIC(uint8_t) lock;
+ RTE_ATOMIC(int16_t) chunk_remainder;
};
};
uint64_t current_chunk;
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index e4b923e..de9f1b0 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -19,22 +19,22 @@
static inline int16_t
timr_bkt_get_rem(struct tim_mem_bucket *bktp)
{
- return __atomic_load_n(&bktp->chunk_remainder,
- __ATOMIC_ACQUIRE);
+ return rte_atomic_load_explicit(&bktp->chunk_remainder,
+ rte_memory_order_acquire);
}
static inline void
timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_store_n(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline void
timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_fetch_sub(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline uint8_t
@@ -47,14 +47,14 @@
timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
- return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint8_t
@@ -81,34 +81,34 @@
{
/*Clear everything except lock. */
const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
- __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
- __ATOMIC_RELAXED);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
+ rte_memory_order_relaxed);
}
static inline uint64_t
timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
{
const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline void
timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
}
static inline uint32_t
@@ -121,13 +121,13 @@
static inline void
timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
}
static inline void
timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
{
- __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
}
static inline uint64_t
@@ -135,7 +135,7 @@
{
const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
TIM_BUCKET_W1_S_NUM_ENTRIES);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v;
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
}
static inline struct tim_mem_entry *
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 25/45] event/dsw: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (23 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 24/45] event/octeontx: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 20:51 ` Mattias Rönnblom
2024-03-21 19:17 ` [PATCH v2 26/45] dma/skeleton: " Tyler Retzlaff
` (19 subsequent siblings)
44 siblings, 1 reply; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/dsw/dsw_evdev.h | 6 +++---
drivers/event/dsw/dsw_event.c | 34 +++++++++++++++++-----------------
drivers/event/dsw/dsw_xstats.c | 4 ++--
3 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index d745c89..20431d2 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -227,9 +227,9 @@ struct dsw_port {
struct rte_ring *ctl_in_ring __rte_cache_aligned;
/* Estimate of current port load. */
- int16_t load __rte_cache_aligned;
+ RTE_ATOMIC(int16_t) load __rte_cache_aligned;
/* Estimate of flows currently migrating to this port. */
- int32_t immigration_load __rte_cache_aligned;
+ RTE_ATOMIC(int32_t) immigration_load __rte_cache_aligned;
} __rte_cache_aligned;
struct dsw_queue {
@@ -252,7 +252,7 @@ struct dsw_evdev {
uint8_t num_queues;
int32_t max_inflight;
- int32_t credits_on_loan __rte_cache_aligned;
+ RTE_ATOMIC(int32_t) credits_on_loan __rte_cache_aligned;
};
#define DSW_CTL_PAUS_REQ (0)
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 23488d9..6c17b44 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -33,7 +33,7 @@
}
total_on_loan =
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
available = dsw->max_inflight - total_on_loan;
acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
@@ -45,13 +45,13 @@
* allocation.
*/
new_total_on_loan =
- __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED) + acquired_credits;
+ rte_atomic_fetch_add_explicit(&dsw->credits_on_loan, acquired_credits,
+ rte_memory_order_relaxed) + acquired_credits;
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
- __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, acquired_credits,
+ rte_memory_order_relaxed);
return false;
}
@@ -77,8 +77,8 @@
port->inflight_credits = leave_credits;
- __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, return_credits,
+ rte_memory_order_relaxed);
DSW_LOG_DP_PORT(DEBUG, port->id,
"Returned %d tokens to pool.\n",
@@ -156,19 +156,19 @@
int16_t period_load;
int16_t new_load;
- old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
+ old_load = rte_atomic_load_explicit(&port->load, rte_memory_order_relaxed);
period_load = dsw_port_load_close_period(port, now);
new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
(DSW_OLD_LOAD_WEIGHT+1);
- __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->load, new_load, rte_memory_order_relaxed);
/* The load of the recently immigrated flows should hopefully
* be reflected the load estimate by now.
*/
- __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->immigration_load, 0, rte_memory_order_relaxed);
}
static void
@@ -390,10 +390,10 @@ struct dsw_queue_flow_burst {
for (i = 0; i < dsw->num_ports; i++) {
int16_t measured_load =
- __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].load, rte_memory_order_relaxed);
int32_t immigration_load =
- __atomic_load_n(&dsw->ports[i].immigration_load,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
+ rte_memory_order_relaxed);
int32_t load = measured_load + immigration_load;
load = RTE_MIN(load, DSW_MAX_LOAD);
@@ -523,8 +523,8 @@ struct dsw_queue_flow_burst {
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
- __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
- candidate_flow_load, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&dsw->ports[candidate_port_id].immigration_load,
+ candidate_flow_load, rte_memory_order_relaxed);
return true;
}
@@ -882,7 +882,7 @@ struct dsw_queue_flow_burst {
}
source_port_load =
- __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&source_port->load, rte_memory_order_relaxed);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Load %d is below threshold level %d.\n",
@@ -1301,7 +1301,7 @@ struct dsw_queue_flow_burst {
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
+ rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed) >
source_port->new_event_threshold))
return 0;
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index 2a83a28..f61dfd8 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -48,7 +48,7 @@ struct dsw_xstats_port {
static uint64_t
dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
{
- return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
}
static struct dsw_xstat_dev dsw_dev_xstats[] = {
@@ -126,7 +126,7 @@ struct dsw_xstats_port {
{
int16_t load;
- load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
+ load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
return DSW_LOAD_TO_PERCENT(load);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v2 25/45] event/dsw: use rte stdatomic API
2024-03-21 19:17 ` [PATCH v2 25/45] event/dsw: " Tyler Retzlaff
@ 2024-03-21 20:51 ` Mattias Rönnblom
0 siblings, 0 replies; 300+ messages in thread
From: Mattias Rönnblom @ 2024-03-21 20:51 UTC (permalink / raw)
To: Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Ziyang Xuan
On 2024-03-21 20:17, Tyler Retzlaff wrote:
> Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> ---
> drivers/event/dsw/dsw_evdev.h | 6 +++---
> drivers/event/dsw/dsw_event.c | 34 +++++++++++++++++-----------------
> drivers/event/dsw/dsw_xstats.c | 4 ++--
> 3 files changed, 22 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
> index d745c89..20431d2 100644
> --- a/drivers/event/dsw/dsw_evdev.h
> +++ b/drivers/event/dsw/dsw_evdev.h
> @@ -227,9 +227,9 @@ struct dsw_port {
> struct rte_ring *ctl_in_ring __rte_cache_aligned;
>
> /* Estimate of current port load. */
> - int16_t load __rte_cache_aligned;
> + RTE_ATOMIC(int16_t) load __rte_cache_aligned;
> /* Estimate of flows currently migrating to this port. */
> - int32_t immigration_load __rte_cache_aligned;
> + RTE_ATOMIC(int32_t) immigration_load __rte_cache_aligned;
> } __rte_cache_aligned;
>
> struct dsw_queue {
> @@ -252,7 +252,7 @@ struct dsw_evdev {
> uint8_t num_queues;
> int32_t max_inflight;
>
> - int32_t credits_on_loan __rte_cache_aligned;
> + RTE_ATOMIC(int32_t) credits_on_loan __rte_cache_aligned;
> };
>
> #define DSW_CTL_PAUS_REQ (0)
> diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
> index 23488d9..6c17b44 100644
> --- a/drivers/event/dsw/dsw_event.c
> +++ b/drivers/event/dsw/dsw_event.c
> @@ -33,7 +33,7 @@
> }
>
> total_on_loan =
> - __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
> + rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
Limit lines to 80 characters, like in the rest of this file.
> available = dsw->max_inflight - total_on_loan;
> acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
>
> @@ -45,13 +45,13 @@
> * allocation.
> */
> new_total_on_loan =
> - __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
> - __ATOMIC_RELAXED) + acquired_credits;
> + rte_atomic_fetch_add_explicit(&dsw->credits_on_loan, acquired_credits,
> + rte_memory_order_relaxed) + acquired_credits;
Format left-over arguments in the same way it's done in the rest of this
file.
Several other changes below suffer from the above two issues.
Provided the formatting is fixed,
Reviewed-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
>
> if (unlikely(new_total_on_loan > dsw->max_inflight)) {
> /* Some other port took the last credits */
> - __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
> - __ATOMIC_RELAXED);
> + rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, acquired_credits,
> + rte_memory_order_relaxed);
> return false;
> }
>
> @@ -77,8 +77,8 @@
>
> port->inflight_credits = leave_credits;
>
> - __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
> - __ATOMIC_RELAXED);
> + rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, return_credits,
> + rte_memory_order_relaxed);
>
> DSW_LOG_DP_PORT(DEBUG, port->id,
> "Returned %d tokens to pool.\n",
> @@ -156,19 +156,19 @@
> int16_t period_load;
> int16_t new_load;
>
> - old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
> + old_load = rte_atomic_load_explicit(&port->load, rte_memory_order_relaxed);
>
> period_load = dsw_port_load_close_period(port, now);
>
> new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
> (DSW_OLD_LOAD_WEIGHT+1);
>
> - __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
> + rte_atomic_store_explicit(&port->load, new_load, rte_memory_order_relaxed);
>
> /* The load of the recently immigrated flows should hopefully
> * be reflected the load estimate by now.
> */
> - __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
> + rte_atomic_store_explicit(&port->immigration_load, 0, rte_memory_order_relaxed);
> }
>
> static void
> @@ -390,10 +390,10 @@ struct dsw_queue_flow_burst {
>
> for (i = 0; i < dsw->num_ports; i++) {
> int16_t measured_load =
> - __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
> + rte_atomic_load_explicit(&dsw->ports[i].load, rte_memory_order_relaxed);
> int32_t immigration_load =
> - __atomic_load_n(&dsw->ports[i].immigration_load,
> - __ATOMIC_RELAXED);
> + rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
> + rte_memory_order_relaxed);
> int32_t load = measured_load + immigration_load;
>
> load = RTE_MIN(load, DSW_MAX_LOAD);
> @@ -523,8 +523,8 @@ struct dsw_queue_flow_burst {
> target_qfs[*targets_len] = *candidate_qf;
> (*targets_len)++;
>
> - __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
> - candidate_flow_load, __ATOMIC_RELAXED);
> + rte_atomic_fetch_add_explicit(&dsw->ports[candidate_port_id].immigration_load,
> + candidate_flow_load, rte_memory_order_relaxed);
>
> return true;
> }
> @@ -882,7 +882,7 @@ struct dsw_queue_flow_burst {
> }
>
> source_port_load =
> - __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
> + rte_atomic_load_explicit(&source_port->load, rte_memory_order_relaxed);
> if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
> DSW_LOG_DP_PORT(DEBUG, source_port->id,
> "Load %d is below threshold level %d.\n",
> @@ -1301,7 +1301,7 @@ struct dsw_queue_flow_burst {
> * above the water mark.
> */
> if (unlikely(num_new > 0 &&
> - __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
> + rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed) >
> source_port->new_event_threshold))
> return 0;
>
> diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
> index 2a83a28..f61dfd8 100644
> --- a/drivers/event/dsw/dsw_xstats.c
> +++ b/drivers/event/dsw/dsw_xstats.c
> @@ -48,7 +48,7 @@ struct dsw_xstats_port {
> static uint64_t
> dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
> {
> - return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
> + return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
> }
>
> static struct dsw_xstat_dev dsw_dev_xstats[] = {
> @@ -126,7 +126,7 @@ struct dsw_xstats_port {
> {
> int16_t load;
>
> - load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
> + load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
>
> return DSW_LOAD_TO_PERCENT(load);
> }
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 26/45] dma/skeleton: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (24 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 25/45] event/dsw: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 27/45] crypto/octeontx: " Tyler Retzlaff
` (18 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/dma/skeleton/skeleton_dmadev.c | 5 +++--
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
index 48f88f9..926c188 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.c
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -142,7 +142,7 @@
else if (desc->op == SKELDMA_OP_FILL)
do_fill(desc);
- __atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&hw->completed_count, 1, rte_memory_order_release);
(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
}
@@ -335,7 +335,8 @@
RTE_SET_USED(vchan);
*status = RTE_DMA_VCHAN_IDLE;
- if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
+ if (hw->submitted_count != rte_atomic_load_explicit(&hw->completed_count,
+ rte_memory_order_acquire)
|| hw->zero_req_count == 0)
*status = RTE_DMA_VCHAN_ACTIVE;
return 0;
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
index c9bf315..3730cbc 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.h
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -81,7 +81,7 @@ struct skeldma_hw {
/* Cache delimiter for cpuwork thread's operation data */
char cache2 __rte_cache_aligned;
volatile uint32_t zero_req_count;
- uint64_t completed_count;
+ RTE_ATOMIC(uint64_t) completed_count;
};
#endif /* SKELETON_DMADEV_H */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 27/45] crypto/octeontx: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (25 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 26/45] dma/skeleton: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 28/45] common/mlx5: " Tyler Retzlaff
` (17 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 947e1be..bafd0c1 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -652,7 +652,7 @@
if (!rsp_info->sched_type)
ssows_head_wait(ws);
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
}
@@ -896,7 +896,7 @@
pcount = pending_queue_level(pqueue, DEFAULT_CMD_QLEN);
/* Ensure pcount isn't read before data lands */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
count = (nb_ops > pcount) ? pcount : nb_ops;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 28/45] common/mlx5: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (26 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 27/45] crypto/octeontx: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 29/45] common/idpf: " Tyler Retzlaff
` (16 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/common/mlx5/linux/mlx5_nl.c | 5 +--
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 ++++-----
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 +++++++++---------
drivers/common/mlx5/mlx5_common_utils.h | 6 ++--
drivers/common/mlx5/mlx5_malloc.c | 58 ++++++++++++++++-----------------
7 files changed, 61 insertions(+), 60 deletions(-)
diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
index 28a1f56..bf6dd19 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -175,10 +175,11 @@ struct mlx5_nl_port_info {
uint16_t state; /**< IB device port state (out). */
};
-uint32_t atomic_sn;
+RTE_ATOMIC(uint32_t) atomic_sn;
/* Generate Netlink sequence number. */
-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)
+#define MLX5_NL_SN_GENERATE (rte_atomic_fetch_add_explicit(&atomic_sn, 1, \
+ rte_memory_order_relaxed) + 1)
/**
* Opens a Netlink socket.
diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 9c80277..14c70ed 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -195,7 +195,7 @@ enum mlx5_cqe_status {
/* Prevent speculative reading of other fields in CQE until
* CQE is valid.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
op_code == MLX5_CQE_REQ_ERR))
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 85ec10d..50922ad 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -35,7 +35,7 @@ struct mlx5_range {
/** Memory region for a mempool. */
struct mlx5_mempool_mr {
struct mlx5_pmd_mr pmd_mr;
- uint32_t refcnt; /**< Number of mempools sharing this MR. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
};
/* Mempool registration. */
@@ -56,11 +56,11 @@ struct mlx5_mempool_reg {
{
struct mlx5_mprq_buf *buf = opaque;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
rte_mempool_put(buf->mp, buf);
- } else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0)) {
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ } else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0)) {
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
rte_mempool_put(buf->mp, buf);
}
}
@@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {
unsigned int i;
for (i = 0; i < mpr->mrs_n; i++)
- __atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {
bool ret = false;
for (i = 0; i < mpr->mrs_n; i++)
- ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0;
+ ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0;
return ret;
}
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index 8789d40..5bdf48a 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {
/* Multi-Packet RQ buffer header. */
struct mlx5_mprq_buf {
struct rte_mempool *mp;
- uint16_t refcnt; /* Atomically accessed refcnt. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
struct rte_mbuf_ext_shared_info shinfos[];
/*
* Shared information per stride.
diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index e69d068..4b95d35 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -81,14 +81,14 @@ struct mlx5_list *
while (entry != NULL) {
if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
if (reuse) {
- ret = __atomic_fetch_add(&entry->ref_cnt, 1,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
+ rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
l_const->name, (void *)entry,
entry->ref_cnt);
} else if (lcore_index < MLX5_LIST_GLOBAL) {
- ret = __atomic_load_n(&entry->ref_cnt,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_load_explicit(&entry->ref_cnt,
+ rte_memory_order_relaxed);
}
if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
return entry;
@@ -151,13 +151,13 @@ struct mlx5_list_entry *
{
struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
- uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
- __ATOMIC_RELAXED);
+ uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
+ rte_memory_order_relaxed);
while (inv_cnt != 0 && entry != NULL) {
struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
- if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
LIST_REMOVE(entry, next);
if (l_const->lcores_share)
l_const->cb_clone_free(l_const->ctx, entry);
@@ -217,7 +217,7 @@ struct mlx5_list_entry *
entry->lcore_idx = (uint32_t)lcore_index;
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
l_const->name, lcore_index,
(void *)entry, entry->ref_cnt);
@@ -254,7 +254,7 @@ struct mlx5_list_entry *
l_inconst->gen_cnt++;
rte_rwlock_write_unlock(&l_inconst->lock);
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
(void *)entry, entry->ref_cnt);
return local_entry;
@@ -285,7 +285,7 @@ struct mlx5_list_entry *
{
struct mlx5_list_entry *gentry = entry->gentry;
- if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
if (entry->lcore_idx == (uint32_t)lcore_idx) {
LIST_REMOVE(entry, next);
@@ -294,23 +294,23 @@ struct mlx5_list_entry *
else
l_const->cb_remove(l_const->ctx, entry);
} else {
- __atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+ 1, rte_memory_order_relaxed);
}
if (!l_const->lcores_share) {
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)entry);
return 0;
}
- if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
rte_rwlock_write_lock(&l_inconst->lock);
if (likely(gentry->ref_cnt == 0)) {
LIST_REMOVE(gentry, next);
rte_rwlock_write_unlock(&l_inconst->lock);
l_const->cb_remove(l_const->ctx, gentry);
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)gentry);
return 0;
@@ -377,7 +377,7 @@ struct mlx5_list_entry *
mlx5_list_get_entry_num(struct mlx5_list *list)
{
MLX5_ASSERT(list);
- return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
}
/********************* Hash List **********************/
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index ae15119..cb4d104 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -29,7 +29,7 @@
*/
struct mlx5_list_entry {
LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
- uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
+ RTE_ATOMIC(uint32_t) ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
uint32_t lcore_idx;
union {
struct mlx5_list_entry *gentry;
@@ -39,7 +39,7 @@ struct mlx5_list_entry {
struct mlx5_list_cache {
LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
- uint32_t inv_cnt; /* Invalid entries counter. */
+ RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
} __rte_cache_aligned;
/**
@@ -111,7 +111,7 @@ struct mlx5_list_const {
struct mlx5_list_inconst {
rte_rwlock_t lock; /* read/write lock. */
volatile uint32_t gen_cnt; /* List modification may update it. */
- volatile uint32_t count; /* number of entries in list. */
+ volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
struct mlx5_list_cache *cache[MLX5_LIST_MAX];
/* Lcore cache, last index is the global cache. */
};
diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
index c58c41d..ef6dabe 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -16,7 +16,7 @@ struct mlx5_sys_mem {
uint32_t init:1; /* Memory allocator initialized. */
uint32_t enable:1; /* System memory select. */
uint32_t reserve:30; /* Reserve. */
- struct rte_memseg_list *last_msl;
+ RTE_ATOMIC(struct rte_memseg_list *) last_msl;
/* last allocated rte memory memseg list. */
#ifdef RTE_LIBRTE_MLX5_DEBUG
uint64_t malloc_sys;
@@ -93,14 +93,14 @@ struct mlx5_sys_mem {
* different with the cached msl.
*/
if (addr && !mlx5_mem_check_msl(addr,
- (struct rte_memseg_list *)__atomic_load_n
- (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
- __atomic_store_n(&mlx5_sys_mem.last_msl,
+ (struct rte_memseg_list *)rte_atomic_load_explicit
+ (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
+ rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
rte_mem_virt2memseg_list(addr),
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
+ rte_memory_order_relaxed);
#endif
}
}
@@ -122,11 +122,11 @@ struct mlx5_sys_mem {
* to check if the memory belongs to rte memory.
*/
if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
- __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+ rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
if (!rte_mem_virt2memseg_list(addr))
return false;
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
#endif
}
return true;
@@ -185,8 +185,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -199,8 +199,8 @@ struct mlx5_sys_mem {
addr = malloc(size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -233,8 +233,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(new_addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -246,8 +246,8 @@ struct mlx5_sys_mem {
new_addr = realloc(addr, size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -259,14 +259,14 @@ struct mlx5_sys_mem {
return;
if (!mlx5_mem_is_rte(addr)) {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
+ rte_memory_order_relaxed);
#endif
mlx5_os_free(addr);
} else {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
+ rte_memory_order_relaxed);
#endif
rte_free(addr);
}
@@ -280,14 +280,14 @@ struct mlx5_sys_mem {
" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
" update:%"PRIi64"",
- __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
#endif
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 29/45] common/idpf: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (27 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 28/45] common/mlx5: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 30/45] common/iavf: " Tyler Retzlaff
` (15 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/common/idpf/idpf_common_device.h | 6 +++---
drivers/common/idpf/idpf_common_rxtx.c | 14 ++++++++------
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 ++++++++--------
4 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 2b94f03..6a44cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -48,7 +48,7 @@ struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
struct virtchnl2_get_capabilities caps;
- volatile uint32_t pend_cmd; /* pending command not finished */
+ volatile RTE_ATOMIC(uint32_t) pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from cp */
uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
@@ -179,8 +179,8 @@ struct idpf_cmd_info {
atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops)
{
uint32_t op_unk = VIRTCHNL2_OP_UNKNOWN;
- bool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ bool ret = rte_atomic_compare_exchange_strong_explicit(&adapter->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
DRV_LOG(ERR, "There is incomplete cmd %d", adapter->pend_cmd);
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 83b131e..b09c58c 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -592,8 +592,8 @@
next_avail = 0;
rx_bufq->nb_rx_hold -= delta;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
return;
@@ -612,8 +612,8 @@
next_avail += nb_refill;
rx_bufq->nb_rx_hold -= nb_refill;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
}
@@ -1093,7 +1093,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(nmb == NULL)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -1203,7 +1204,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index b49b1ed..eeeeed1 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -97,7 +97,7 @@
#define IDPF_RX_SPLIT_BUFQ2_ID 2
struct idpf_rx_stats {
- uint64_t mbuf_alloc_failed;
+ RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
struct idpf_rx_queue {
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index f65e8d5..3b5e124 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -38,8 +38,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -168,8 +168,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
@@ -564,8 +564,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
@@ -638,8 +638,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 30/45] common/iavf: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (28 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 29/45] common/idpf: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 31/45] baseband/acc: " Tyler Retzlaff
` (14 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/common/iavf/iavf_impl.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
index 8919b0e..c0ff301 100644
--- a/drivers/common/iavf/iavf_impl.c
+++ b/drivers/common/iavf/iavf_impl.c
@@ -18,7 +18,7 @@ enum iavf_status
u64 size,
u32 alignment)
{
- static uint64_t iavf_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) iavf_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -26,7 +26,7 @@ enum iavf_status
return IAVF_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "iavf_dma_%" PRIu64,
- __atomic_fetch_add(&iavf_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&iavf_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 31/45] baseband/acc: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (29 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 30/45] common/iavf: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 32/45] net/txgbe: " Tyler Retzlaff
` (13 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 34 deletions(-)
diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
index 4f666e5..ee50b9c 100644
--- a/drivers/baseband/acc/rte_acc100_pmd.c
+++ b/drivers/baseband/acc/rte_acc100_pmd.c
@@ -3673,8 +3673,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3728,8 +3728,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3742,8 +3742,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3755,8 +3755,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d\n",
desc, rsp.val, descs_in_tb, desc->req.numCBs);
@@ -3793,8 +3793,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3846,8 +3846,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3902,8 +3902,8 @@
uint8_t cbs_in_tb = 1, cb_idx = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3919,8 +3919,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3930,8 +3930,8 @@
/* Read remaining CBs if exists */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
desc, rsp.val, cb_idx, cbs_in_tb);
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index 88b1104..f7c54be 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -3119,7 +3119,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
return -1;
@@ -3157,7 +3158,8 @@
struct rte_bbdev_enc_op *op;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3192,7 +3194,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + 1 > max_requested_ops)
return -1;
@@ -3208,7 +3211,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3220,7 +3224,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
@@ -3246,7 +3251,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3290,7 +3296,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3346,7 +3353,8 @@
uint32_t tb_crc_check = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3362,7 +3370,8 @@
/* Check if last CB in TB is ready to dequeue (and thus the whole TB) - checking sdone bit.
* If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3372,7 +3381,8 @@
/* Read remaining CBs if exists. */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
rsp.val, desc->rsp.add_info_0,
@@ -3790,7 +3800,8 @@
struct rte_bbdev_fft_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4116,7 +4127,8 @@
uint8_t descs_in_op, i;
desc = acc_desc_tail(q, dequeued_ops);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4127,7 +4139,8 @@
/* Get last CB. */
last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - 1);
/* Check if last op is ready to dequeue by checking fdone bit. If not exit. */
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -4137,8 +4150,8 @@
for (i = 1; i < descs_in_op - 1; i++) {
last_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
& q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit(
+ (uint64_t __rte_atomic *)last_desc, rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
}
@@ -4154,7 +4167,8 @@
for (i = 0; i < descs_in_op; i++) {
desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 32/45] net/txgbe: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (30 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 31/45] baseband/acc: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 33/45] net/null: " Tyler Retzlaff
` (12 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 12 +++++++-----
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
3 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b75e889..a58f197 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -595,7 +595,7 @@ static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -2834,7 +2834,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev);
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -2859,7 +2859,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
rte_thread_detach(rte_thread_self());
txgbe_dev_setup_link_alarm_handler(dev);
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -2908,7 +2908,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
} else if (hw->phy.media_type == txgbe_media_type_fiber &&
dev->data->dev_conf.intr_conf.lsc != 0) {
txgbe_dev_wait_setup_link_complete(dev, 0);
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the TXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -2918,7 +2919,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
"txgbe-link",
txgbe_dev_setup_link_thread_handler, dev) < 0) {
PMD_DRV_LOG(ERR, "Create link thread failed!");
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 7e8067c..e8f55f7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -372,7 +372,7 @@ struct txgbe_adapter {
/* For RSS reta table update */
uint8_t rss_reta_updated;
- uint32_t link_thread_running;
+ RTE_ATOMIC(uint32_t) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index f1341fb..1abc190 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -206,7 +206,7 @@ static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->device_id = pci_dev->id.device_id;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 33/45] net/null: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (31 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 32/45] net/txgbe: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 34/45] event/dlb2: " Tyler Retzlaff
` (11 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/null/rte_eth_null.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 7c46004..f4ed3b8 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -37,8 +37,8 @@ struct null_queue {
struct rte_mempool *mb_pool;
struct rte_mbuf *dummy_packet;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_options {
@@ -102,7 +102,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -130,7 +130,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -155,7 +155,7 @@ struct pmd_internals {
rte_pktmbuf_free(bufs[i]);
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -178,7 +178,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 34/45] event/dlb2: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (32 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 33/45] net/null: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 21:03 ` Mattias Rönnblom
2024-03-21 19:17 ` [PATCH v2 35/45] dma/idxd: " Tyler Retzlaff
` (10 subsequent siblings)
44 siblings, 1 reply; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/event/dlb2/dlb2.c | 34 +++++++++++++++++-----------------
drivers/event/dlb2/dlb2_priv.h | 10 +++++-----
drivers/event/dlb2/dlb2_xstats.c | 2 +-
3 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 628ddef..0b91f03 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1005,7 +1005,7 @@ struct process_local_port_data
}
dlb2->new_event_limit = config->nb_events_limit;
- __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&dlb2->inflights, 0, rte_memory_order_seq_cst);
/* Save number of ports/queues for this event dev */
dlb2->num_ports = config->nb_event_ports;
@@ -2668,10 +2668,10 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
batch_size = credits;
if (likely(credits &&
- __atomic_compare_exchange_n(
+ rte_atomic_compare_exchange_strong_explicit(
qm_port->credit_pool[type],
- &credits, credits - batch_size, false,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
+ &credits, credits - batch_size,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst)))
return batch_size;
else
return 0;
@@ -2687,7 +2687,7 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
/* Replenish credits, saving one quanta for enqueues */
uint16_t val = ev_port->inflight_credits - quanta;
- __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
ev_port->inflight_credits -= val;
}
}
@@ -2696,8 +2696,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_port *ev_port)
{
- uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
- __ATOMIC_SEQ_CST);
+ uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
+ rte_memory_order_seq_cst);
const int num = 1;
if (unlikely(ev_port->inflight_max < sw_inflights)) {
@@ -2719,8 +2719,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
return 1;
}
- __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
+ rte_memory_order_seq_cst);
ev_port->inflight_credits += (credit_update_quanta);
if (ev_port->inflight_credits < num) {
@@ -3234,17 +3234,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_ldb_credits += num;
if (qm_port->cached_ldb_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_LDB_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_ldb_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
@@ -3252,17 +3252,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_dir_credits += num;
if (qm_port->cached_dir_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_DIR_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_dir_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index 31a3bee..46883f2 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -348,7 +348,7 @@ struct dlb2_port {
uint32_t dequeue_depth;
enum dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
- uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) *credit_pool[DLB2_NUM_QUEUE_TYPES];
union {
struct {
uint16_t cached_ldb_credits;
@@ -586,7 +586,7 @@ struct dlb2_eventdev {
uint32_t xstats_count_mode_dev;
uint32_t xstats_count_mode_port;
uint32_t xstats_count;
- uint32_t inflights; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) inflights; /* use __atomic builtins */
uint32_t new_event_limit;
int max_num_events_override;
int num_dir_credits_override;
@@ -624,14 +624,14 @@ struct dlb2_eventdev {
uint16_t max_ldb_credits;
uint16_t max_dir_credits;
/* use __atomic builtins */ /* shared hw cred */
- uint32_t ldb_credit_pool __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) ldb_credit_pool __rte_cache_aligned;
/* use __atomic builtins */ /* shared hw cred */
- uint32_t dir_credit_pool __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) dir_credit_pool __rte_cache_aligned;
};
struct {
uint16_t max_credits;
/* use __atomic builtins */ /* shared hw cred */
- uint32_t credit_pool __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) credit_pool __rte_cache_aligned;
};
};
uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */
diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
index ff15271..22094f3 100644
--- a/drivers/event/dlb2/dlb2_xstats.c
+++ b/drivers/event/dlb2/dlb2_xstats.c
@@ -173,7 +173,7 @@ struct dlb2_xstats_entry {
case nb_events_limit:
return dlb2->new_event_limit;
case inflight_events:
- return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
+ return rte_atomic_load_explicit(&dlb2->inflights, rte_memory_order_seq_cst);
case ldb_pool_size:
return dlb2->num_ldb_credits;
case dir_pool_size:
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v2 34/45] event/dlb2: use rte stdatomic API
2024-03-21 19:17 ` [PATCH v2 34/45] event/dlb2: " Tyler Retzlaff
@ 2024-03-21 21:03 ` Mattias Rönnblom
2024-04-09 19:31 ` Sevincer, Abdullah
0 siblings, 1 reply; 300+ messages in thread
From: Mattias Rönnblom @ 2024-03-21 21:03 UTC (permalink / raw)
To: Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Ziyang Xuan
On 2024-03-21 20:17, Tyler Retzlaff wrote:
> Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> ---
> drivers/event/dlb2/dlb2.c | 34 +++++++++++++++++-----------------
> drivers/event/dlb2/dlb2_priv.h | 10 +++++-----
> drivers/event/dlb2/dlb2_xstats.c | 2 +-
> 3 files changed, 23 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
> index 628ddef..0b91f03 100644
> --- a/drivers/event/dlb2/dlb2.c
> +++ b/drivers/event/dlb2/dlb2.c
> @@ -1005,7 +1005,7 @@ struct process_local_port_data
> }
>
> dlb2->new_event_limit = config->nb_events_limit;
> - __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
> + rte_atomic_store_explicit(&dlb2->inflights, 0, rte_memory_order_seq_cst);
>
> /* Save number of ports/queues for this event dev */
> dlb2->num_ports = config->nb_event_ports;
> @@ -2668,10 +2668,10 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
> batch_size = credits;
>
> if (likely(credits &&
> - __atomic_compare_exchange_n(
> + rte_atomic_compare_exchange_strong_explicit(
> qm_port->credit_pool[type],
> - &credits, credits - batch_size, false,
> - __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
> + &credits, credits - batch_size,
> + rte_memory_order_seq_cst, rte_memory_order_seq_cst)))
> return batch_size;
> else
> return 0;
> @@ -2687,7 +2687,7 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
> /* Replenish credits, saving one quanta for enqueues */
> uint16_t val = ev_port->inflight_credits - quanta;
>
> - __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
> + rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
> ev_port->inflight_credits -= val;
> }
> }
> @@ -2696,8 +2696,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
> dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
> struct dlb2_eventdev_port *ev_port)
> {
> - uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
> - __ATOMIC_SEQ_CST);
> + uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
> + rte_memory_order_seq_cst);
> const int num = 1;
>
> if (unlikely(ev_port->inflight_max < sw_inflights)) {
> @@ -2719,8 +2719,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
> return 1;
> }
>
> - __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
> - __ATOMIC_SEQ_CST);
> + rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
> + rte_memory_order_seq_cst);
> ev_port->inflight_credits += (credit_update_quanta);
>
> if (ev_port->inflight_credits < num) {
> @@ -3234,17 +3234,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
> if (qm_port->dlb2->version == DLB2_HW_V2) {
> qm_port->cached_ldb_credits += num;
> if (qm_port->cached_ldb_credits >= 2 * batch_size) {
> - __atomic_fetch_add(
> + rte_atomic_fetch_add_explicit(
> qm_port->credit_pool[DLB2_LDB_QUEUE],
> - batch_size, __ATOMIC_SEQ_CST);
> + batch_size, rte_memory_order_seq_cst);
> qm_port->cached_ldb_credits -= batch_size;
> }
> } else {
> qm_port->cached_credits += num;
> if (qm_port->cached_credits >= 2 * batch_size) {
> - __atomic_fetch_add(
> + rte_atomic_fetch_add_explicit(
> qm_port->credit_pool[DLB2_COMBINED_POOL],
> - batch_size, __ATOMIC_SEQ_CST);
> + batch_size, rte_memory_order_seq_cst);
> qm_port->cached_credits -= batch_size;
> }
> }
> @@ -3252,17 +3252,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
> if (qm_port->dlb2->version == DLB2_HW_V2) {
> qm_port->cached_dir_credits += num;
> if (qm_port->cached_dir_credits >= 2 * batch_size) {
> - __atomic_fetch_add(
> + rte_atomic_fetch_add_explicit(
> qm_port->credit_pool[DLB2_DIR_QUEUE],
> - batch_size, __ATOMIC_SEQ_CST);
> + batch_size, rte_memory_order_seq_cst);
> qm_port->cached_dir_credits -= batch_size;
> }
> } else {
> qm_port->cached_credits += num;
> if (qm_port->cached_credits >= 2 * batch_size) {
> - __atomic_fetch_add(
> + rte_atomic_fetch_add_explicit(
> qm_port->credit_pool[DLB2_COMBINED_POOL],
> - batch_size, __ATOMIC_SEQ_CST);
> + batch_size, rte_memory_order_seq_cst);
> qm_port->cached_credits -= batch_size;
> }
> }
> diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
> index 31a3bee..46883f2 100644
> --- a/drivers/event/dlb2/dlb2_priv.h
> +++ b/drivers/event/dlb2/dlb2_priv.h
> @@ -348,7 +348,7 @@ struct dlb2_port {
> uint32_t dequeue_depth;
> enum dlb2_token_pop_mode token_pop_mode;
> union dlb2_port_config cfg;
> - uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
> + RTE_ATOMIC(uint32_t) *credit_pool[DLB2_NUM_QUEUE_TYPES];
> union {
> struct {
> uint16_t cached_ldb_credits;
> @@ -586,7 +586,7 @@ struct dlb2_eventdev {
> uint32_t xstats_count_mode_dev;
> uint32_t xstats_count_mode_port;
> uint32_t xstats_count;
> - uint32_t inflights; /* use __atomic builtins */
> + RTE_ATOMIC(uint32_t) inflights; /* use __atomic builtins */
> uint32_t new_event_limit;
> int max_num_events_override;
> int num_dir_credits_override;
> @@ -624,14 +624,14 @@ struct dlb2_eventdev {
> uint16_t max_ldb_credits;
> uint16_t max_dir_credits;
> /* use __atomic builtins */ /* shared hw cred */
Delete the first of the above two comments.
> - uint32_t ldb_credit_pool __rte_cache_aligned;
> + RTE_ATOMIC(uint32_t) ldb_credit_pool __rte_cache_aligned;
> /* use __atomic builtins */ /* shared hw cred */
Same here.
> - uint32_t dir_credit_pool __rte_cache_aligned;
> + RTE_ATOMIC(uint32_t) dir_credit_pool __rte_cache_aligned;
> };
> struct {
> uint16_t max_credits;
> /* use __atomic builtins */ /* shared hw cred */
> - uint32_t credit_pool __rte_cache_aligned;
> + RTE_ATOMIC(uint32_t) credit_pool __rte_cache_aligned;
> };
> };
> uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */
> diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
> index ff15271..22094f3 100644
> --- a/drivers/event/dlb2/dlb2_xstats.c
> +++ b/drivers/event/dlb2/dlb2_xstats.c
> @@ -173,7 +173,7 @@ struct dlb2_xstats_entry {
> case nb_events_limit:
> return dlb2->new_event_limit;
> case inflight_events:
> - return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
> + return rte_atomic_load_explicit(&dlb2->inflights, rte_memory_order_seq_cst);
This is more a question to the driver maintainer, but why does this load
need to be CST? What stores need it to be ordered against. Even
load-acquire seems overkill to me, but I may well be missing something.
> case ldb_pool_size:
> return dlb2->num_ldb_credits;
> case dir_pool_size:
^ permalink raw reply [flat|nested] 300+ messages in thread
* RE: [PATCH v2 34/45] event/dlb2: use rte stdatomic API
2024-03-21 21:03 ` Mattias Rönnblom
@ 2024-04-09 19:31 ` Sevincer, Abdullah
0 siblings, 0 replies; 300+ messages in thread
From: Sevincer, Abdullah @ 2024-04-09 19:31 UTC (permalink / raw)
To: Mattias Rönnblom, Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Ajit Khaparde,
Alok Prasad, Burakov, Anatoly, Andrew Rybchenko, Anoob Joseph,
Richardson, Bruce, Marohn, Byron, Chenbo Xia, Chengwen Feng,
Loftus, Ciara, Power, Ciara, Dariusz Sosnowski, Hunt, David,
Devendra Singh Rawat, Carrillo, Erik G, Guoyang Zhou,
Harman Kalra, Van Haaren, Harry, Honnappa Nagarahalli,
Jakub Grajciar, Jerin Jacob, Jeroen de Borst, Jian Wang,
Jiawen Wu, Jie Hai, Wu, Jingjing, Joshua Washington, Joyce Kong,
Guo, Junfeng, Laatz, Kevin, Konstantin Ananyev, Liang Ma,
Long Li, Maciej Czekaj, Matan Azrad, Maxime Coquelin, Chautru,
Nicolas, Ori Kam, Pavan Nikhilesh, Mccarthy, Peter,
Rahul Lakkireddy, Pattan, Reshma, Xu, Rosen, Ruifeng Wang,
Rushil Gupta, Gobriel, Sameh, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Medvedkin, Vladimir, Xiaoyun Wang, Wang,
Yipeng1, Yisen Zhuang, Zhang, Yuying, Ziyang Xuan
>+uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
Spell check complains here you can add a space after the '*' if you don’t want that complaint. (e.g. uint32_t * credit_pool).
> case nb_events_limit:
> return dlb2->new_event_limit;
> case inflight_events:
> - return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
> + return rte_atomic_load_explicit(&dlb2->inflights,
> +rte_memory_order_seq_cst);
>+This is more a question to the driver maintainer, but why does this load need to be CST? What stores need it to be ordered against. Even load-acquire seems overkill to me, but I may well be missing something.
I am not sure of this why previous maintainers went this way and I am looking into it . To me now it looks like the strict requirements can be changed. If so I will submit a patch later to change and address this.
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 35/45] dma/idxd: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (33 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 34/45] event/dlb2: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 36/45] crypto/ccp: " Tyler Retzlaff
` (9 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index cd41777..537cf9b 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -33,7 +33,7 @@ struct idxd_pci_common {
rte_spinlock_t lk;
uint8_t wq_cfg_sz;
- uint16_t ref_count;
+ RTE_ATOMIC(uint16_t) ref_count;
volatile struct rte_idxd_bar0 *regs;
volatile uint32_t *wq_regs_base;
volatile struct rte_idxd_grpcfg *grp_regs;
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index a78889a..06fa115 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -136,7 +136,8 @@
* the PCI struct
*/
/* NOTE: review for potential ordering optimization */
- is_last_wq = (__atomic_fetch_sub(&idxd->u.pci->ref_count, 1, __ATOMIC_SEQ_CST) == 1);
+ is_last_wq = (rte_atomic_fetch_sub_explicit(&idxd->u.pci->ref_count, 1,
+ rte_memory_order_seq_cst) == 1);
if (is_last_wq) {
/* disable the device */
err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -330,9 +331,9 @@
return ret;
}
qid = rte_dma_get_dev_id_by_name(qname);
- max_qid = __atomic_load_n(
+ max_qid = rte_atomic_load_explicit(
&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
- __ATOMIC_SEQ_CST);
+ rte_memory_order_seq_cst);
/* we have queue 0 done, now configure the rest of the queues */
for (qid = 1; qid < max_qid; qid++) {
@@ -389,7 +390,7 @@
free(idxd.u.pci);
return ret;
}
- __atomic_fetch_add(&idxd.u.pci->ref_count, 1, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&idxd.u.pci->ref_count, 1, rte_memory_order_seq_cst);
}
return 0;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 36/45] crypto/ccp: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (34 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 35/45] dma/idxd: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 37/45] common/cpt: " Tyler Retzlaff
` (8 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/crypto/ccp/ccp_dev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index b7ca3af..41c1422 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -116,15 +116,15 @@ struct ccp_queue *
static inline void
ccp_set_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ (1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline void
ccp_clear_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_and_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ ~(1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline uint32_t
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 37/45] common/cpt: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (35 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 36/45] crypto/ccp: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 38/45] bus/vmbus: " Tyler Retzlaff
` (7 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/common/cpt/cpt_common.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index d70668a..dc79e3a 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -73,7 +73,7 @@ struct cpt_request_info {
const unsigned int qsize)
{
/* Ensure ordering between setting the entry and updating the tail */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
q->tail = (q->tail + cnt) & (qsize - 1);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 38/45] bus/vmbus: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (36 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 37/45] common/cpt: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 21:12 ` Mattias Rönnblom
2024-03-22 19:34 ` Long Li
2024-03-21 19:17 ` [PATCH v2 39/45] examples: " Tyler Retzlaff
` (6 subsequent siblings)
44 siblings, 2 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/bus/vmbus/vmbus_channel.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index 4d74df3..e96a4eb 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -19,22 +19,23 @@
#include "private.h"
static inline void
-vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+vmbus_sync_set_bit(volatile RTE_ATOMIC(uint32_t) *addr, uint32_t mask)
{
/* Use GCC builtin which atomic does atomic OR operation */
- __atomic_fetch_or(addr, mask, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit(addr, mask, rte_memory_order_seq_cst);
}
static inline void
vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
{
- uint32_t *monitor_addr, monitor_mask;
+ RTE_ATOMIC(uint32_t) *monitor_addr, monitor_mask;
unsigned int trigger_index;
trigger_index = monitor_id / HV_MON_TRIG_LEN;
monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
- monitor_addr = &channel->monitor_page->trigs[trigger_index].pending;
+ monitor_addr =
+ (uint32_t __rte_atomic *)&channel->monitor_page->trigs[trigger_index].pending;
vmbus_sync_set_bit(monitor_addr, monitor_mask);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v2 38/45] bus/vmbus: use rte stdatomic API
2024-03-21 19:17 ` [PATCH v2 38/45] bus/vmbus: " Tyler Retzlaff
@ 2024-03-21 21:12 ` Mattias Rönnblom
2024-03-21 21:34 ` Long Li
2024-03-22 19:34 ` Long Li
1 sibling, 1 reply; 300+ messages in thread
From: Mattias Rönnblom @ 2024-03-21 21:12 UTC (permalink / raw)
To: Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Ziyang Xuan
On 2024-03-21 20:17, Tyler Retzlaff wrote:
> Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> ---
> drivers/bus/vmbus/vmbus_channel.c | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
> index 4d74df3..e96a4eb 100644
> --- a/drivers/bus/vmbus/vmbus_channel.c
> +++ b/drivers/bus/vmbus/vmbus_channel.c
> @@ -19,22 +19,23 @@
> #include "private.h"
>
> static inline void
> -vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
> +vmbus_sync_set_bit(volatile RTE_ATOMIC(uint32_t) *addr, uint32_t mask)
> {
> /* Use GCC builtin which atomic does atomic OR operation */
Remove/rephrase this comment.
> - __atomic_fetch_or(addr, mask, __ATOMIC_SEQ_CST);
> + rte_atomic_fetch_or_explicit(addr, mask, rte_memory_order_seq_cst);
> }
>
> static inline void
> vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
> {
> - uint32_t *monitor_addr, monitor_mask;
> + RTE_ATOMIC(uint32_t) *monitor_addr, monitor_mask;
> unsigned int trigger_index;
>
> trigger_index = monitor_id / HV_MON_TRIG_LEN;
> monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
>
> - monitor_addr = &channel->monitor_page->trigs[trigger_index].pending;
> + monitor_addr =
> + (uint32_t __rte_atomic *)&channel->monitor_page->trigs[trigger_index].pending;
Why is "pending" not RTE_ATOMIC()?
> vmbus_sync_set_bit(monitor_addr, monitor_mask);
> }
>
^ permalink raw reply [flat|nested] 300+ messages in thread
* RE: [PATCH v2 38/45] bus/vmbus: use rte stdatomic API
2024-03-21 21:12 ` Mattias Rönnblom
@ 2024-03-21 21:34 ` Long Li
2024-03-22 7:04 ` Mattias Rönnblom
0 siblings, 1 reply; 300+ messages in thread
From: Long Li @ 2024-03-21 21:34 UTC (permalink / raw)
To: Mattias Rönnblom, Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Maciej Czekaj, Matan Azrad,
Maxime Coquelin, Nicolas Chautru, Ori Kam, Pavan Nikhilesh,
Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan, Rosen Xu,
Ruifeng Wang, Rushil Gupta, Sameh Gobriel, Sivaprasad Tummala,
Somnath Kotur, stephen, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru, Slava Ovsiienko,
Vladimir Medvedkin, Xiaoyun Wang, Yipeng Wang, Yisen Zhuang,
Yuying Zhang, Ziyang Xuan
> > static inline void
> > vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t
> monitor_id)
> > {
> > - uint32_t *monitor_addr, monitor_mask;
> > + RTE_ATOMIC(uint32_t) *monitor_addr, monitor_mask;
> > unsigned int trigger_index;
> >
> > trigger_index = monitor_id / HV_MON_TRIG_LEN;
> > monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
> >
> > - monitor_addr = &channel->monitor_page->trigs[trigger_index].pending;
> > + monitor_addr =
> > + (uint32_t __rte_atomic
> > +*)&channel->monitor_page->trigs[trigger_index].pending;
>
> Why is "pending" not RTE_ATOMIC()?
The usage is okay. The value is used to notify the VSP (Hyper-V). It's always set (no read) from DPDK.
Linux kernel driver does the same thing.
Long
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v2 38/45] bus/vmbus: use rte stdatomic API
2024-03-21 21:34 ` Long Li
@ 2024-03-22 7:04 ` Mattias Rönnblom
2024-03-22 19:32 ` Long Li
0 siblings, 1 reply; 300+ messages in thread
From: Mattias Rönnblom @ 2024-03-22 7:04 UTC (permalink / raw)
To: Long Li, Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Maciej Czekaj, Matan Azrad,
Maxime Coquelin, Nicolas Chautru, Ori Kam, Pavan Nikhilesh,
Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan, Rosen Xu,
Ruifeng Wang, Rushil Gupta, Sameh Gobriel, Sivaprasad Tummala,
Somnath Kotur, stephen, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru, Slava Ovsiienko,
Vladimir Medvedkin, Xiaoyun Wang, Yipeng Wang, Yisen Zhuang,
Yuying Zhang, Ziyang Xuan
On 2024-03-21 22:34, Long Li wrote:
>
>>> static inline void
>>> vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t
>> monitor_id)
>>> {
>>> - uint32_t *monitor_addr, monitor_mask;
>>> + RTE_ATOMIC(uint32_t) *monitor_addr, monitor_mask;
>>> unsigned int trigger_index;
>>>
>>> trigger_index = monitor_id / HV_MON_TRIG_LEN;
>>> monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
>>>
>>> - monitor_addr = &channel->monitor_page->trigs[trigger_index].pending;
>>> + monitor_addr =
>>> + (uint32_t __rte_atomic
>>> +*)&channel->monitor_page->trigs[trigger_index].pending;
>>
>> Why is "pending" not RTE_ATOMIC()?
>
> The usage is okay. The value is used to notify the VSP (Hyper-V). It's always set (no read) from DPDK.
>
OK, so my question was not "does it need to be atomic", but rather "why
isn't it marked RTE_ATOMIC() when it's treated as atomic".
But what you are saying is that it need not be atomic? Just the
equivalent of WRITE_ONCE()? Or a relaxed atomic store?
> Linux kernel driver does the same thing.
>
> Long
^ permalink raw reply [flat|nested] 300+ messages in thread
* RE: [PATCH v2 38/45] bus/vmbus: use rte stdatomic API
2024-03-22 7:04 ` Mattias Rönnblom
@ 2024-03-22 19:32 ` Long Li
0 siblings, 0 replies; 300+ messages in thread
From: Long Li @ 2024-03-22 19:32 UTC (permalink / raw)
To: Mattias Rönnblom, Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Maciej Czekaj, Matan Azrad,
Maxime Coquelin, Nicolas Chautru, Ori Kam, Pavan Nikhilesh,
Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan, Rosen Xu,
Ruifeng Wang, Rushil Gupta, Sameh Gobriel, Sivaprasad Tummala,
Somnath Kotur, stephen, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru, Slava Ovsiienko,
Vladimir Medvedkin, Xiaoyun Wang, Yipeng Wang, Yisen Zhuang,
Yuying Zhang, Ziyang Xuan
> > The usage is okay. The value is used to notify the VSP (Hyper-V). It's always set
> (no read) from DPDK.
> >
>
> OK, so my question was not "does it need to be atomic", but rather "why isn't it
> marked RTE_ATOMIC() when it's treated as atomic".
>
> But what you are saying is that it need not be atomic? Just the equivalent of
> WRITE_ONCE()? Or a relaxed atomic store?
Sorry I misunderstood your question. Yes, it will be a good idea to make "pending" as RTE_ATOMIC.
This value needs to be atomic. However, the existing code is still correct in that updating is done in atomic.
^ permalink raw reply [flat|nested] 300+ messages in thread
* RE: [PATCH v2 38/45] bus/vmbus: use rte stdatomic API
2024-03-21 19:17 ` [PATCH v2 38/45] bus/vmbus: " Tyler Retzlaff
2024-03-21 21:12 ` Mattias Rönnblom
@ 2024-03-22 19:34 ` Long Li
2024-03-25 16:41 ` Tyler Retzlaff
1 sibling, 1 reply; 300+ messages in thread
From: Long Li @ 2024-03-22 19:34 UTC (permalink / raw)
To: Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Maciej Czekaj, Matan Azrad,
Maxime Coquelin, Nicolas Chautru, Ori Kam, Pavan Nikhilesh,
Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan, Rosen Xu,
Ruifeng Wang, Rushil Gupta, Sameh Gobriel, Sivaprasad Tummala,
Somnath Kotur, stephen, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru, Slava Ovsiienko,
Vladimir Medvedkin, Xiaoyun Wang, Yipeng Wang, Yisen Zhuang,
Yuying Zhang, Yuying Zhang, Ziyang Xuan
> static inline void
> vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
> {
> - uint32_t *monitor_addr, monitor_mask;
> + RTE_ATOMIC(uint32_t) *monitor_addr, monitor_mask;
Does this mean monitor_mask will also change to RTE_ATOMIC(uint32_t)?
Seems not necessary.
> unsigned int trigger_index;
>
> trigger_index = monitor_id / HV_MON_TRIG_LEN;
> monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
>
> - monitor_addr = &channel->monitor_page->trigs[trigger_index].pending;
> + monitor_addr =
> + (uint32_t __rte_atomic
> +*)&channel->monitor_page->trigs[trigger_index].pending;
> vmbus_sync_set_bit(monitor_addr, monitor_mask); }
>
> --
> 1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v2 38/45] bus/vmbus: use rte stdatomic API
2024-03-22 19:34 ` Long Li
@ 2024-03-25 16:41 ` Tyler Retzlaff
0 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-25 16:41 UTC (permalink / raw)
To: Long Li
Cc: dev, Mattias Rönnblom, Morten Brørup,
Abdullah Sevincer, Ajit Khaparde, Alok Prasad, Anatoly Burakov,
Andrew Rybchenko, Anoob Joseph, Bruce Richardson, Byron Marohn,
Chenbo Xia, Chengwen Feng, Ciara Loftus, Ciara Power,
Dariusz Sosnowski, David Hunt, Devendra Singh Rawat,
Erik Gabriel Carrillo, Guoyang Zhou, Harman Kalra,
Harry van Haaren, Honnappa Nagarahalli, Jakub Grajciar,
Jerin Jacob, Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai,
Jingjing Wu, Joshua Washington, Joyce Kong, Junfeng Guo,
Kevin Laatz, Konstantin Ananyev, Liang Ma, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, stephen, Suanming Mou,
Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Slava Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang, Yipeng Wang,
Yisen Zhuang, Yuying Zhang, Ziyang Xuan
On Fri, Mar 22, 2024 at 07:34:46PM +0000, Long Li wrote:
> > static inline void
> > vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
> > {
> > - uint32_t *monitor_addr, monitor_mask;
> > + RTE_ATOMIC(uint32_t) *monitor_addr, monitor_mask;
>
> Does this mean monitor_mask will also change to RTE_ATOMIC(uint32_t)?
>
> Seems not necessary.
looks like a mistake, i will review and make clear in next revision.
thanks for spotting it.
>
> > unsigned int trigger_index;
> >
> > trigger_index = monitor_id / HV_MON_TRIG_LEN;
> > monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
> >
> > - monitor_addr = &channel->monitor_page->trigs[trigger_index].pending;
> > + monitor_addr =
> > + (uint32_t __rte_atomic
> > +*)&channel->monitor_page->trigs[trigger_index].pending;
> > vmbus_sync_set_bit(monitor_addr, monitor_mask); }
> >
> > --
> > 1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 39/45] examples: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (37 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 38/45] bus/vmbus: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 40/45] app/dumpcap: " Tyler Retzlaff
` (5 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
examples/bbdev_app/main.c | 13 +++++----
examples/l2fwd-event/l2fwd_common.h | 4 +--
examples/l2fwd-event/l2fwd_event.c | 24 ++++++++--------
examples/l2fwd-jobstats/main.c | 11 ++++----
.../client_server_mp/mp_server/main.c | 6 ++--
examples/server_node_efd/efd_server/main.c | 6 ++--
examples/vhost/main.c | 32 +++++++++++-----------
examples/vhost/main.h | 4 +--
examples/vhost/virtio_net.c | 13 +++++----
examples/vhost_blk/vhost_blk.c | 8 +++---
examples/vm_power_manager/channel_monitor.c | 9 +++---
11 files changed, 68 insertions(+), 62 deletions(-)
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 16599ae..214fdf2 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -165,7 +165,7 @@ struct stats_lcore_params {
.num_dec_cores = 1,
};
-static uint16_t global_exit_flag;
+static RTE_ATOMIC(uint16_t) global_exit_flag;
/* display usage */
static inline void
@@ -277,7 +277,7 @@ uint16_t bbdev_parse_number(const char *mask)
signal_handler(int signum)
{
printf("\nSignal %d received\n", signum);
- __atomic_store_n(&global_exit_flag, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 1, rte_memory_order_relaxed);
}
static void
@@ -321,7 +321,8 @@ uint16_t bbdev_parse_number(const char *mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME &&
- !__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED); count++) {
+ !rte_atomic_load_explicit(&global_exit_flag,
+ rte_memory_order_relaxed); count++) {
memset(&link, 0, sizeof(link));
link_get_err = rte_eth_link_get_nowait(port_id, &link);
@@ -675,7 +676,7 @@ uint16_t bbdev_parse_number(const char *mask)
{
struct stats_lcore_params *stats_lcore = arg;
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
print_stats(stats_lcore);
rte_delay_ms(500);
}
@@ -921,7 +922,7 @@ uint16_t bbdev_parse_number(const char *mask)
const bool run_decoder = (lcore_conf->core_type &
(1 << RTE_BBDEV_OP_TURBO_DEC));
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
if (run_encoder)
run_encoding(lcore_conf);
if (run_decoder)
@@ -1055,7 +1056,7 @@ uint16_t bbdev_parse_number(const char *mask)
.align = alignof(struct rte_mbuf *),
};
- __atomic_store_n(&global_exit_flag, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 0, rte_memory_order_relaxed);
sigret = signal(SIGTERM, signal_handler);
if (sigret == SIG_ERR)
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index 07f84cb..3d2e303 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -61,8 +61,8 @@
/* Per-port statistics struct */
struct l2fwd_port_statistics {
uint64_t dropped;
- uint64_t tx;
- uint64_t rx;
+ RTE_ATOMIC(uint64_t) tx;
+ RTE_ATOMIC(uint64_t) rx;
} __rte_cache_aligned;
/* Event vector attributes */
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 4b5a032..2247202 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -163,8 +163,8 @@
dst_port = rsrc->dst_ports[mbuf->port];
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].rx,
+ 1, rte_memory_order_relaxed);
mbuf->port = dst_port;
if (flags & L2FWD_EVENT_UPDT_MAC)
@@ -179,8 +179,8 @@
rte_event_eth_tx_adapter_txq_set(mbuf, 0);
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].tx,
+ 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -367,8 +367,8 @@
vec->queue = 0;
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbufs[0]->port].rx,
+ vec->nb_elem, rte_memory_order_relaxed);
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (j < vec->nb_elem)
@@ -382,14 +382,14 @@
}
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[vec->port].tx,
+ vec->nb_elem, rte_memory_order_relaxed);
} else {
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].rx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (j < vec->nb_elem)
rte_prefetch0(
@@ -406,9 +406,9 @@
rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].tx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
}
}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 2653db4..9a094ef 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -80,7 +80,7 @@ struct lcore_queue_conf {
struct rte_jobstats idle_job;
struct rte_jobstats_context jobs_context;
- uint16_t stats_read_pending;
+ RTE_ATOMIC(uint16_t) stats_read_pending;
rte_spinlock_t lock;
} __rte_cache_aligned;
/* >8 End of list of queues to be polled for given lcore. */
@@ -151,9 +151,9 @@ struct l2fwd_port_statistics {
uint64_t collection_time = rte_get_timer_cycles();
/* Ask forwarding thread to give us stats. */
- __atomic_store_n(&qconf->stats_read_pending, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&qconf->lock);
- __atomic_store_n(&qconf->stats_read_pending, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 0, rte_memory_order_relaxed);
/* Collect context statistics. */
stats_period = ctx->state_time - ctx->start_time;
@@ -522,8 +522,9 @@ struct l2fwd_port_statistics {
repeats++;
need_manage = qconf->flush_timer.expire < now;
/* Check if we was esked to give a stats. */
- stats_read_pending = __atomic_load_n(&qconf->stats_read_pending,
- __ATOMIC_RELAXED);
+ stats_read_pending = rte_atomic_load_explicit(
+ &qconf->stats_read_pending,
+ rte_memory_order_relaxed);
need_manage |= stats_read_pending;
for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index f54bb8b..ebfc2fe 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -157,12 +157,12 @@ struct client_rx_buf {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
uint32_t status = 0;
/* Only one core should display stats */
- if (__atomic_compare_exchange_n(&display_stats, &status, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &status, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/server_node_efd/efd_server/main.c b/examples/server_node_efd/efd_server/main.c
index fd72882..75ff0ea 100644
--- a/examples/server_node_efd/efd_server/main.c
+++ b/examples/server_node_efd/efd_server/main.c
@@ -177,12 +177,12 @@ struct efd_stats {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
/* Only one core should display stats */
uint32_t display_init = 0;
- if (__atomic_compare_exchange_n(&display_stats, &display_init, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &display_init, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned int sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3fc1b15..4391d88 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1052,10 +1052,10 @@ static unsigned check_ports_num(unsigned nb_ports)
}
if (enable_stats) {
- __atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_total_atomic, 1,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
src_vdev->stats.tx_total++;
src_vdev->stats.tx += ret;
}
@@ -1072,10 +1072,10 @@ static unsigned check_ports_num(unsigned nb_ports)
ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, nr_xmit,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1404,10 +1404,10 @@ static void virtio_tx_offload(struct rte_mbuf *m)
}
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, rx_count,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, enqueue_count,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1832,10 +1832,10 @@ uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
- __ATOMIC_SEQ_CST);
- rx = __atomic_load_n(&vdev->stats.rx_atomic,
- __ATOMIC_SEQ_CST);
+ rx_total = rte_atomic_load_explicit(&vdev->stats.rx_total_atomic,
+ rte_memory_order_seq_cst);
+ rx = rte_atomic_load_explicit(&vdev->stats.rx_atomic,
+ rte_memory_order_seq_cst);
rx_dropped = rx_total - rx;
printf("Statistics for device %d\n"
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 2fcb837..b163955 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -22,8 +22,8 @@
struct device_statistics {
uint64_t tx;
uint64_t tx_total;
- uint64_t rx_atomic;
- uint64_t rx_total_atomic;
+ RTE_ATOMIC(uint64_t) rx_atomic;
+ RTE_ATOMIC(uint64_t) rx_total_atomic;
};
struct vhost_queue {
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 514c8e0..55af6e7 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -198,7 +198,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
+ avail_idx = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire);
start_idx = queue->last_used_idx;
free_entries = avail_idx - start_idx;
count = RTE_MIN(count, free_entries);
@@ -231,7 +232,8 @@
rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
}
- __atomic_fetch_add(&vr->used->idx, count, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, count,
+ rte_memory_order_release);
queue->last_used_idx += count;
rte_vhost_vring_call(dev->vid, queue_id);
@@ -386,8 +388,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
- queue->last_avail_idx;
+ free_entries = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire) - queue->last_avail_idx;
if (free_entries == 0)
return 0;
@@ -442,7 +444,8 @@
queue->last_avail_idx += i;
queue->last_used_idx += i;
- __atomic_fetch_add(&vr->used->idx, i, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, i,
+ rte_memory_order_acq_rel);
rte_vhost_vring_call(dev->vid, queue_id);
diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
index 376f7b8..03f1ac9 100644
--- a/examples/vhost_blk/vhost_blk.c
+++ b/examples/vhost_blk/vhost_blk.c
@@ -85,9 +85,9 @@ struct vhost_blk_ctrlr *
*/
used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
used->idx++;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
vq->id, used->idx, task->req_idx);
@@ -111,12 +111,12 @@ struct vhost_blk_ctrlr *
desc->id = task->buffer_id;
desc->addr = 0;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
if (vq->used_wrap_counter)
desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
else
desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
task->inflight_idx);
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 5fef268..d384c86 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -828,8 +828,9 @@ void channel_monitor_exit(void)
return -1;
uint32_t channel_connected = CHANNEL_MGR_CHANNEL_CONNECTED;
- if (__atomic_compare_exchange_n(&(chan_info->status), &channel_connected,
- CHANNEL_MGR_CHANNEL_PROCESSING, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_connected,
+ CHANNEL_MGR_CHANNEL_PROCESSING, rte_memory_order_relaxed,
+ rte_memory_order_relaxed) == 0)
return -1;
if (pkt->command == RTE_POWER_CPU_POWER) {
@@ -934,8 +935,8 @@ void channel_monitor_exit(void)
* from management thread
*/
uint32_t channel_processing = CHANNEL_MGR_CHANNEL_PROCESSING;
- __atomic_compare_exchange_n(&(chan_info->status), &channel_processing,
- CHANNEL_MGR_CHANNEL_CONNECTED, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_processing,
+ CHANNEL_MGR_CHANNEL_CONNECTED, rte_memory_order_relaxed, rte_memory_order_relaxed);
return 0;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 40/45] app/dumpcap: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (38 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 39/45] examples: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 41/45] app/test: " Tyler Retzlaff
` (4 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/dumpcap/main.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/app/dumpcap/main.c b/app/dumpcap/main.c
index cc0f66b..b25b95e 100644
--- a/app/dumpcap/main.c
+++ b/app/dumpcap/main.c
@@ -51,7 +51,7 @@
/* command line flags */
static const char *progname;
-static bool quit_signal;
+static RTE_ATOMIC(bool) quit_signal;
static bool group_read;
static bool quiet;
static bool use_pcapng = true;
@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv)
static void
signal_handler(int sig_num __rte_unused)
{
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
@@ -490,7 +490,7 @@ static void statistics_loop(void)
printf("%-15s %10s %10s\n",
"Interface", "Received", "Dropped");
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
RTE_ETH_FOREACH_DEV(p) {
if (rte_eth_dev_get_name_by_port(p, name) < 0)
continue;
@@ -528,7 +528,7 @@ static void statistics_loop(void)
static void
monitor_primary(void *arg __rte_unused)
{
- if (__atomic_load_n(&quit_signal, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed))
return;
if (rte_eal_primary_proc_alive(NULL)) {
@@ -536,7 +536,7 @@ static void statistics_loop(void)
} else {
fprintf(stderr,
"Primary process is no longer active, exiting...\n");
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
}
@@ -983,7 +983,7 @@ int main(int argc, char **argv)
show_count(0);
}
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
if (process_ring(out, r) < 0) {
fprintf(stderr, "pcapng file write failed; %s\n",
strerror(errno));
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 41/45] app/test: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (39 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 40/45] app/dumpcap: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 42/45] app/test-eventdev: " Tyler Retzlaff
` (3 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test/test_bpf.c | 46 ++++++++-----
app/test/test_distributor.c | 114 ++++++++++++++++-----------------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++++----
app/test/test_hash_multiwriter.c | 16 ++---
app/test/test_hash_readwrite.c | 74 ++++++++++-----------
app/test/test_hash_readwrite_lf_perf.c | 88 ++++++++++++-------------
app/test/test_lcores.c | 25 ++++----
app/test/test_lpm_perf.c | 14 ++--
app/test/test_mcslock.c | 12 ++--
app/test/test_mempool_perf.c | 9 +--
app/test/test_pflock.c | 13 ++--
app/test/test_pmd_perf.c | 10 +--
app/test/test_rcu_qsbr_perf.c | 114 +++++++++++++++++----------------
app/test/test_ring_perf.c | 11 ++--
app/test/test_ring_stress_impl.h | 10 +--
app/test/test_rwlock.c | 9 +--
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +++----
app/test/test_spinlock.c | 9 +--
app/test/test_stack_perf.c | 12 ++--
app/test/test_threads.c | 33 +++++-----
app/test/test_ticketlock.c | 9 +--
app/test/test_timer.c | 31 +++++----
24 files changed, 378 insertions(+), 343 deletions(-)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index 53e3a31..2e43442 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -39,8 +39,8 @@
*/
struct dummy_offset {
- uint64_t u64;
- uint32_t u32;
+ RTE_ATOMIC(uint64_t) u64;
+ RTE_ATOMIC(uint32_t) u32;
uint16_t u16;
uint8_t u8;
};
@@ -1581,32 +1581,46 @@ struct bpf_test {
memset(&dfe, 0, sizeof(dfe));
rv = 1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = -1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = (int32_t)TEST_FILL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_3;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
}
diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index d2037b7..df871e3 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -47,14 +47,14 @@ struct worker_params {
struct worker_params worker_params;
/* statics - all zero-initialized by default */
-static volatile int quit; /**< general quit variable for all threads */
-static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
-static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
-static volatile unsigned worker_idx;
-static volatile unsigned zero_idx;
+static volatile RTE_ATOMIC(int) quit; /**< general quit variable for all threads */
+static volatile RTE_ATOMIC(int) zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile RTE_ATOMIC(int) zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
+static volatile RTE_ATOMIC(unsigned int) zero_idx;
struct worker_stats {
- volatile unsigned handled_packets;
+ volatile RTE_ATOMIC(unsigned int) handled_packets;
} __rte_cache_aligned;
struct worker_stats worker_stats[RTE_MAX_LCORE];
@@ -66,8 +66,8 @@ struct worker_stats {
{
unsigned i, count = 0;
for (i = 0; i < worker_idx; i++)
- count += __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED);
+ count += rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed);
return count;
}
@@ -77,8 +77,8 @@ struct worker_stats {
{
unsigned int i;
for (i = 0; i < RTE_MAX_LCORE; i++)
- __atomic_store_n(&worker_stats[i].handled_packets, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&worker_stats[i].handled_packets, 0,
+ rte_memory_order_relaxed);
}
/* this is the basic worker function for sanity test
@@ -91,17 +91,17 @@ struct worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -162,8 +162,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with all zero hashes done.\n");
/* pick two flows and check they go correctly */
@@ -189,9 +189,9 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(
+ rte_atomic_load_explicit(
&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_memory_order_relaxed));
printf("Sanity test with two hash values done\n");
}
@@ -218,8 +218,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with non-zero hashes done\n");
rte_mempool_put_bulk(p, (void *)bufs, BURST);
@@ -311,18 +311,18 @@ struct worker_stats {
struct rte_distributor *d = wp->dist;
unsigned int i;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
rte_pktmbuf_free(buf[i]);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
@@ -381,51 +381,51 @@ struct worker_stats {
unsigned int num;
unsigned int zero_id = 0;
unsigned int zero_unset;
- const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
- __ATOMIC_RELAXED);
+ const unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
/* wait for quit single globally, or for worker zero, wait
* for zero_quit */
while (!quit && !(id == zero_id && zero_quit)) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
if (id == zero_id) {
rte_distributor_return_pkt(d, id, NULL, 0);
/* for worker zero, allow it to restart to pick up last packet
* when all workers are shutting down.
*/
- __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 1, rte_memory_order_release);
while (zero_quit)
usleep(100);
- __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 0, rte_memory_order_release);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets,
- num, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets,
+ num, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
}
@@ -491,17 +491,17 @@ struct worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST * 2) {
printf("Line %d: Error, not all packets flushed. "
@@ -560,18 +560,18 @@ struct worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
@@ -596,18 +596,18 @@ struct worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num, i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
*seq_field(buf[i]) += id + 1;
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -679,8 +679,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
/* Sort returned packets by sent order (sequence numbers). */
for (i = 0; i < buf_count; i++) {
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index ca86845..ba3cf26 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -31,7 +31,7 @@
/* static vars - zero initialized by default */
static volatile int quit;
-static volatile unsigned worker_idx;
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
struct worker_stats {
volatile unsigned handled_packets;
@@ -121,7 +121,7 @@ struct worker_stats {
struct rte_distributor *d = arg;
unsigned int num = 0;
int i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
struct rte_mbuf *buf[8] __rte_cache_aligned;
for (i = 0; i < 8; i++)
diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index 9296de2..bae39af 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -53,12 +53,13 @@
#define MAX_LCORES (rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))
-static uint32_t obj_count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) obj_count;
+static RTE_ATOMIC(uint32_t) synchro;
#define WAIT_SYNCHRO_FOR_WORKERS() do { \
if (lcore_self != rte_get_main_lcore()) \
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, \
+ rte_memory_order_relaxed); \
} while(0)
/*
@@ -71,7 +72,8 @@
WAIT_SYNCHRO_FOR_WORKERS();
- __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
+ /* silent the check in the caller */
+ rte_atomic_store_explicit(&obj_count, 1, rte_memory_order_relaxed);
if (rte_eal_init(0, NULL) != -1)
return -1;
@@ -113,7 +115,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
if (rp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -178,7 +180,7 @@
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -244,7 +246,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_hash_create(&hash_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple times simultaneously */
@@ -311,7 +313,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_fbk_hash_create(&fbk_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -376,7 +378,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
if (lpm != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -437,8 +439,8 @@ struct test_case test_cases[] = {
if (pt_case->func == NULL)
return -1;
- __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&obj_count, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -448,7 +450,7 @@ struct test_case test_cases[] = {
rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
}
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
@@ -463,7 +465,7 @@ struct test_case test_cases[] = {
pt_case->clean(lcore_id);
}
- count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
+ count = rte_atomic_load_explicit(&obj_count, rte_memory_order_relaxed);
if (count != 1) {
printf("%s: common object allocated %d times (should be 1)\n",
pt_case->name, count);
diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
index ed9dd41..33d3147 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/app/test/test_hash_multiwriter.c
@@ -43,8 +43,8 @@ struct {
const uint32_t nb_total_tsx_insertion = 4.5*1024*1024;
uint32_t rounded_nb_total_tsx_insertion;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
static int use_htm;
@@ -84,8 +84,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)
tbl_multiwriter_test_params.keys[i]
@@ -166,8 +166,8 @@ struct {
tbl_multiwriter_test_params.found = found;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
/* Get list of enabled cores */
i = 0;
@@ -233,8 +233,8 @@ struct {
printf("No key corrupted during multiwriter insertion.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED)/
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed)/
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf(" cycles per insertion: %llu\n", cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 4997a01..1867376 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -45,14 +45,14 @@ struct {
struct rte_hash *h;
} tbl_rw_test_param;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
-static uint64_t gread_cycles;
-static uint64_t gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
-static uint64_t greads;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrites;
static int
test_hash_readwrite_worker(__rte_unused void *arg)
@@ -110,8 +110,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_rw_test_param.num_insert; i++)
tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
@@ -209,8 +209,8 @@ struct {
int worker_cnt = rte_lcore_count() - 1;
uint32_t tot_insert = 0;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
if (init_params(use_ext, use_htm, use_rw_lf, use_jhash) != 0)
goto err;
@@ -269,8 +269,8 @@ struct {
printf("No key corrupted during read-write test.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
@@ -310,8 +310,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, i, rte_memory_order_relaxed);
return 0;
}
@@ -344,9 +344,9 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rw_test_param.num_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rw_test_param.num_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -369,11 +369,11 @@ struct {
uint64_t start = 0, end = 0;
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
if (init_params(0, use_htm, 0, use_jhash) != 0)
goto err;
@@ -430,10 +430,10 @@ struct {
if (tot_worker_lcore < core_cnt[n] * 2)
goto finish;
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -475,8 +475,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_only[n] = cycles_per_insertion;
printf("Reader only: cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -484,17 +484,17 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->write_only[n] = cycles_per_insertion;
printf("Writer only: cycles per writes: %llu\n",
cycles_per_insertion);
}
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -569,8 +569,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_write_r[n] = cycles_per_insertion;
printf("Read-write cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -578,8 +578,8 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->read_write_w[n] = cycles_per_insertion;
printf("Read-write cycles per writes: %llu\n",
cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite_lf_perf.c b/app/test/test_hash_readwrite_lf_perf.c
index 5d18850..4523985 100644
--- a/app/test/test_hash_readwrite_lf_perf.c
+++ b/app/test/test_hash_readwrite_lf_perf.c
@@ -86,10 +86,10 @@ struct rwc_perf {
struct rte_hash *h;
} tbl_rwc_test_param;
-static uint64_t gread_cycles;
-static uint64_t greads;
-static uint64_t gwrite_cycles;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gwrites;
static volatile uint8_t writer_done;
@@ -651,8 +651,8 @@ struct rwc_perf {
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, read_cnt*loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, read_cnt*loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -724,8 +724,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -742,8 +742,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_hit[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -791,8 +791,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -811,8 +811,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_miss[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -861,8 +861,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -884,8 +884,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_nsp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -935,8 +935,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -958,8 +958,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_sp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1007,8 +1007,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1030,8 +1030,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
}
@@ -1087,9 +1087,9 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n",
rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0,
+ rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1127,10 +1127,10 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles,
- __ATOMIC_RELAXED) /
- __atomic_load_n(&greads,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles,
+ rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads,
+ rte_memory_order_relaxed);
rwc_perf_results->multi_rw[m][k][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n",
@@ -1178,8 +1178,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
write_type = WRITE_NO_KEY_SHIFT;
@@ -1210,8 +1210,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_extbkt[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1280,9 +1280,9 @@ struct rwc_perf {
tbl_rwc_test_param.keys_no_ks + i);
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rwc_test_param.single_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -1328,8 +1328,8 @@ struct rwc_perf {
rwc_core_cnt[n];
printf("\nNumber of writers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
rte_rcu_qsbr_init(rv, RTE_MAX_LCORE);
@@ -1364,8 +1364,8 @@ struct rwc_perf {
rte_eal_mp_wait_lcore();
unsigned long long cycles_per_write_operation =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
rwc_perf_results->writer_add_del[n]
= cycles_per_write_operation;
printf("Cycles per write operation: %llu\n",
diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index 3434a0d..bd5c0dd 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -10,6 +10,7 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_thread.h>
+#include <rte_stdatomic.h>
#include "test.h"
@@ -25,7 +26,7 @@ struct thread_context {
enum { Thread_INIT, Thread_ERROR, Thread_DONE } state;
bool lcore_id_any;
rte_thread_t id;
- unsigned int *registered_count;
+ RTE_ATOMIC(unsigned int) *registered_count;
};
static uint32_t thread_loop(void *arg)
@@ -49,10 +50,10 @@ static uint32_t thread_loop(void *arg)
t->state = Thread_ERROR;
}
/* Report register happened to the control thread. */
- __atomic_fetch_add(t->registered_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(t->registered_count, 1, rte_memory_order_release);
/* Wait for release from the control thread. */
- while (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(t->registered_count, rte_memory_order_acquire) != 0)
sched_yield();
rte_thread_unregister();
lcore_id = rte_lcore_id();
@@ -73,7 +74,7 @@ static uint32_t thread_loop(void *arg)
{
struct thread_context thread_contexts[RTE_MAX_LCORE];
unsigned int non_eal_threads_count;
- unsigned int registered_count;
+ RTE_ATOMIC(unsigned int) registered_count;
struct thread_context *t;
unsigned int i;
int ret;
@@ -93,7 +94,7 @@ static uint32_t thread_loop(void *arg)
}
printf("non-EAL threads count: %u\n", non_eal_threads_count);
/* Wait all non-EAL threads to register. */
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
@@ -109,14 +110,14 @@ static uint32_t thread_loop(void *arg)
if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
non_eal_threads_count++;
printf("non-EAL threads count: %u\n", non_eal_threads_count);
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
}
skip_lcore_any:
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -225,7 +226,7 @@ struct limit_lcore_context {
struct thread_context thread_contexts[2];
unsigned int non_eal_threads_count = 0;
struct limit_lcore_context l[2] = {};
- unsigned int registered_count = 0;
+ RTE_ATOMIC(unsigned int) registered_count = 0;
struct thread_context *t;
void *handle[2] = {};
unsigned int i;
@@ -275,7 +276,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 1 ||
@@ -298,7 +299,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 2 ||
@@ -315,7 +316,7 @@ struct limit_lcore_context {
}
rte_lcore_dump(stdout);
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -337,7 +338,7 @@ struct limit_lcore_context {
cleanup_threads:
/* Release all threads */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
rte_thread_join(t->id, NULL);
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
index 82daf9e..bc4bdde 100644
--- a/app/test/test_lpm_perf.c
+++ b/app/test/test_lpm_perf.c
@@ -22,8 +22,8 @@
struct rte_lpm *lpm;
static struct rte_rcu_qsbr *rv;
static volatile uint8_t writer_done;
-static volatile uint32_t thr_id;
-static uint64_t gwrite_cycles;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
static uint32_t num_writers;
/* LPM APIs are not thread safe, use spinlock */
@@ -362,7 +362,7 @@ static void generate_large_route_rule_table(void)
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -470,7 +470,7 @@ static void generate_large_route_rule_table(void)
total_cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, total_cycles, rte_memory_order_relaxed);
return 0;
@@ -540,9 +540,9 @@ static void generate_large_route_rule_table(void)
reader_f = test_lpm_reader;
writer_done = 0;
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
/* Launch reader threads */
for (i = j; i < num_cores; i++)
@@ -563,7 +563,7 @@ static void generate_large_route_rule_table(void)
printf("Total LPM Adds: %d\n", TOTAL_WRITES);
printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
printf("Average LPM Add/Del: %"PRIu64" cycles\n",
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed)
/ TOTAL_WRITES);
writer_done = 1;
diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c
index 46ff13c..8fcbc11 100644
--- a/app/test/test_mcslock.c
+++ b/app/test/test_mcslock.c
@@ -42,7 +42,7 @@
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_mcslock_per_core(__rte_unused void *arg)
@@ -75,7 +75,7 @@
rte_mcslock_t ml_perf_me;
/* wait synchro */
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -100,14 +100,14 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lock = 1;
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
@@ -116,11 +116,11 @@
printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index 96de347..35f0597 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -88,7 +88,7 @@
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
/* number of objects in one bulk operation (get or put) */
static unsigned n_get_bulk;
@@ -188,7 +188,8 @@ struct mempool_test_stats {
/* wait synchro for workers */
if (lcore_id != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
start_cycles = rte_get_timer_cycles();
@@ -233,7 +234,7 @@ struct mempool_test_stats {
int ret;
unsigned cores_save = cores;
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
/* reset stats */
memset(stats, 0, sizeof(stats));
@@ -258,7 +259,7 @@ struct mempool_test_stats {
}
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
ret = per_lcore_mempool_test(mp);
diff --git a/app/test/test_pflock.c b/app/test/test_pflock.c
index 5f77b15..d989a68 100644
--- a/app/test/test_pflock.c
+++ b/app/test/test_pflock.c
@@ -31,7 +31,7 @@
static rte_pflock_t sl;
static rte_pflock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_pflock_per_core(__rte_unused void *arg)
@@ -69,7 +69,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -99,7 +100,7 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -107,7 +108,7 @@
printf("\nTest with phase-fair lock on single core...\n");
lock = 1;
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -116,12 +117,12 @@
printf("\nPhase-fair test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index f6d97f2..46ae80d 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -537,7 +537,7 @@ enum {
return 0;
}
-static uint64_t start;
+static RTE_ATOMIC(uint64_t) start;
static inline int
poll_burst(void *args)
@@ -575,7 +575,7 @@ enum {
num[portid] = pkt_per_port;
}
- rte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_64((uint64_t *)(uintptr_t)&start, 1, rte_memory_order_acquire);
cur_tsc = rte_rdtsc();
while (total) {
@@ -629,9 +629,9 @@ enum {
/* only when polling first */
if (flags == SC_BURST_POLL_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_relaxed);
else
- __atomic_store_n(&start, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 0, rte_memory_order_relaxed);
/* start polling thread
* if in POLL_FIRST mode, poll once launched;
@@ -655,7 +655,7 @@ enum {
/* only when polling second */
if (flags == SC_BURST_XMIT_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_release);
/* wait for polling finished */
diff_tsc = rte_eal_wait_lcore(lcore);
diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c
index ce88a73..d1bf5c5 100644
--- a/app/test/test_rcu_qsbr_perf.c
+++ b/app/test/test_rcu_qsbr_perf.c
@@ -25,13 +25,15 @@
static uint32_t *hash_data[TOTAL_ENTRY];
static volatile uint8_t writer_done;
static volatile uint8_t all_registered;
-static volatile uint32_t thr_id;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
static struct rte_hash *h;
static char hash_name[8];
-static uint64_t updates, checks;
-static uint64_t update_cycles, check_cycles;
+static RTE_ATOMIC(uint64_t) updates;
+static RTE_ATOMIC(uint64_t) checks;
+static RTE_ATOMIC(uint64_t) update_cycles;
+static RTE_ATOMIC(uint64_t) check_cycles;
/* Scale down results to 1000 operations to support lower
* granularity clocks.
@@ -44,7 +46,7 @@
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -81,8 +83,8 @@
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
/* Make the thread offline */
rte_rcu_qsbr_thread_offline(t[0], thread_id);
@@ -113,8 +115,8 @@
} while (loop_cnt < 20000000);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -130,15 +132,15 @@
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
printf("\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\n",
num_cores - 1);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
if (all_registered == 1)
tmp_num_cores = num_cores - 1;
@@ -168,15 +170,16 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -193,10 +196,10 @@
size_t sz;
unsigned int i, tmp_num_cores;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf Test: %d Readers\n", num_cores);
@@ -220,11 +223,11 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -241,10 +244,10 @@
size_t sz;
unsigned int i;
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: %d Writers ('wait' in qsbr_check == false)\n",
num_cores);
@@ -266,10 +269,11 @@
/* Wait until all readers have exited */
rte_eal_mp_wait_lcore();
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -317,8 +321,8 @@
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
rte_rcu_qsbr_thread_unregister(temp, thread_id);
@@ -389,12 +393,12 @@ static struct rte_hash *init_hash(void)
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\n", num_cores);
@@ -453,8 +457,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
@@ -467,12 +471,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -511,7 +515,7 @@ static struct rte_hash *init_hash(void)
printf("Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\n", num_cores);
- __atomic_store_n(&thr_id, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_relaxed);
if (all_registered == 1)
tmp_num_cores = num_cores;
@@ -570,8 +574,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
/* Wait and check return value from reader threads */
@@ -583,12 +587,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -622,10 +626,10 @@ static struct rte_hash *init_hash(void)
return TEST_SKIPPED;
}
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
num_cores = 0;
RTE_LCORE_FOREACH_WORKER(core_id) {
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index d7c5a4c..6d7a0a8 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -186,7 +186,7 @@ struct thread_params {
void *burst = NULL;
#ifdef RTE_USE_C11_MEM_MODEL
- if (__atomic_fetch_add(&lcore_count, 1, __ATOMIC_RELAXED) + 1 != 2)
+ if (rte_atomic_fetch_add_explicit(&lcore_count, 1, rte_memory_order_relaxed) + 1 != 2)
#else
if (__sync_add_and_fetch(&lcore_count, 1) != 2)
#endif
@@ -320,7 +320,7 @@ struct thread_params {
return 0;
}
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static uint64_t queue_count[RTE_MAX_LCORE];
#define TIME_MS 100
@@ -342,7 +342,8 @@ struct thread_params {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (time_diff < hz * TIME_MS / 1000) {
@@ -397,12 +398,12 @@ struct thread_params {
param.r = r;
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lcore_f(¶m);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_ring_stress_impl.h b/app/test/test_ring_stress_impl.h
index 2dec897..e6b23c0 100644
--- a/app/test/test_ring_stress_impl.h
+++ b/app/test/test_ring_stress_impl.h
@@ -24,7 +24,7 @@ enum {
WRK_CMD_RUN,
};
-static uint32_t wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;
+static RTE_ATOMIC(uint32_t) wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;
/* test run-time in seconds */
static const uint32_t run_time = 60;
@@ -203,7 +203,7 @@ struct ring_elem {
* really releasing any data through 'wrk_cmd' to
* the worker.
*/
- while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) != WRK_CMD_RUN)
+ while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) != WRK_CMD_RUN)
rte_pause();
cl = rte_rdtsc_precise();
@@ -246,7 +246,7 @@ struct ring_elem {
lcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);
- } while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) == WRK_CMD_RUN);
+ } while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) == WRK_CMD_RUN);
cl = rte_rdtsc_precise() - cl;
if (prcs == 0)
@@ -360,12 +360,12 @@ struct ring_elem {
}
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_RUN, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_RUN, rte_memory_order_release);
rte_delay_us(run_time * US_PER_S);
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_STOP, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_STOP, rte_memory_order_release);
/* wait for workers and collect stats. */
mc = rte_lcore_id();
diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index 5079895..f67fc35 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -35,7 +35,7 @@
static rte_rwlock_t sl;
static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
enum {
LC_TYPE_RDLOCK,
@@ -101,7 +101,8 @@ struct try_rwlock_lcore {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -134,12 +135,12 @@ struct try_rwlock_lcore {
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(NULL);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c
index 873bd60..7455bac 100644
--- a/app/test/test_seqlock.c
+++ b/app/test/test_seqlock.c
@@ -22,7 +22,7 @@ struct data {
struct reader {
struct data *data;
- uint8_t stop;
+ RTE_ATOMIC(uint8_t) stop;
};
#define WRITER_RUNTIME 2.0 /* s */
@@ -79,7 +79,7 @@ struct reader {
struct reader *r = arg;
int rc = TEST_SUCCESS;
- while (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&
+ while (rte_atomic_load_explicit(&r->stop, rte_memory_order_relaxed) == 0 &&
rc == TEST_SUCCESS) {
struct data *data = r->data;
bool interrupted;
@@ -115,7 +115,7 @@ struct reader {
static void
reader_stop(struct reader *reader)
{
- __atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&reader->stop, 1, rte_memory_order_relaxed);
}
#define NUM_WRITERS 2 /* main lcore + one worker */
diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c
index c12d52d..010ab82 100644
--- a/app/test/test_service_cores.c
+++ b/app/test/test_service_cores.c
@@ -59,15 +59,15 @@ static int32_t dummy_mt_unsafe_cb(void *args)
* test, because two threads are concurrently in a non-MT safe callback.
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so clear pass flag */
*pass_test = 0;
@@ -86,15 +86,15 @@ static int32_t dummy_mt_safe_cb(void *args)
* that 2 threads are running the callback at the same time: MT safe
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so set pass flag */
*pass_test = 1;
@@ -748,15 +748,15 @@ static int32_t dummy_mt_safe_cb(void *args)
/* retrieve done flag and lock to add/sub */
uint32_t *done = ¶ms[0];
- uint32_t *lock = ¶ms[1];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)¶ms[1];
while (!*done) {
- __atomic_fetch_add(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(lock, 1, rte_memory_order_relaxed);
rte_delay_us(500);
- if (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1)
+ if (rte_atomic_load_explicit(lock, rte_memory_order_relaxed) > 1)
/* pass: second core has simultaneously incremented */
*done = 1;
- __atomic_fetch_sub(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(lock, 1, rte_memory_order_relaxed);
}
return 0;
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 9a481f2..a29405a 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -48,7 +48,7 @@
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_spinlock_per_core(__rte_unused void *arg)
@@ -110,7 +110,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -149,11 +150,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c
index c5e1caa..3f17a26 100644
--- a/app/test/test_stack_perf.c
+++ b/app/test/test_stack_perf.c
@@ -23,7 +23,7 @@
*/
static volatile unsigned int bulk_sizes[] = {8, MAX_BURST};
-static uint32_t lcore_barrier;
+static RTE_ATOMIC(uint32_t) lcore_barrier;
struct lcore_pair {
unsigned int c1;
@@ -143,8 +143,8 @@ struct thread_args {
s = args->s;
size = args->sz;
- __atomic_fetch_sub(&lcore_barrier, 1, __ATOMIC_RELAXED);
- rte_wait_until_equal_32(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&lcore_barrier, 1, rte_memory_order_relaxed);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&lcore_barrier, 0, rte_memory_order_relaxed);
uint64_t start = rte_rdtsc();
@@ -173,7 +173,7 @@ struct thread_args {
unsigned int i;
for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
- __atomic_store_n(&lcore_barrier, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 2, rte_memory_order_relaxed);
args[0].sz = args[1].sz = bulk_sizes[i];
args[0].s = args[1].s = s;
@@ -206,7 +206,7 @@ struct thread_args {
int cnt = 0;
double avg;
- __atomic_store_n(&lcore_barrier, n, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, n, rte_memory_order_relaxed);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (++cnt >= n)
@@ -300,7 +300,7 @@ struct thread_args {
struct lcore_pair cores;
struct rte_stack *s;
- __atomic_store_n(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 0, rte_memory_order_relaxed);
s = rte_stack_create(STACK_NAME, STACK_SIZE, rte_socket_id(), flags);
if (s == NULL) {
diff --git a/app/test/test_threads.c b/app/test/test_threads.c
index 4ac3f26..6d6881a 100644
--- a/app/test/test_threads.c
+++ b/app/test/test_threads.c
@@ -6,12 +6,13 @@
#include <rte_thread.h>
#include <rte_debug.h>
+#include <rte_stdatomic.h>
#include "test.h"
RTE_LOG_REGISTER(threads_logtype_test, test.threads, INFO);
-static uint32_t thread_id_ready;
+static RTE_ATOMIC(uint32_t) thread_id_ready;
static uint32_t
thread_main(void *arg)
@@ -19,9 +20,9 @@
if (arg != NULL)
*(rte_thread_t *)arg = rte_thread_self();
- __atomic_store_n(&thread_id_ready, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 1, rte_memory_order_release);
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 1)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 1)
;
return 0;
@@ -37,13 +38,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
@@ -61,13 +62,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main,
&thread_main_id) == 0, "Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_detach(thread_id) == 0,
"Failed to detach thread.");
@@ -85,7 +86,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
priority = RTE_THREAD_PRIORITY_NORMAL;
@@ -121,7 +122,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Priority set mismatches priority get");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -137,7 +138,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset0) == 0,
@@ -190,7 +191,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes affinity thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset1) == 0,
@@ -198,7 +199,7 @@
RTE_TEST_ASSERT(memcmp(&cpuset0, &cpuset1, sizeof(rte_cpuset_t)) == 0,
"Failed to apply affinity attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -219,7 +220,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes priority thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_priority(thread_id, &priority) == 0,
@@ -227,7 +228,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Failed to apply priority attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -243,13 +244,13 @@
thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
index 1fbbedb..9b6b584 100644
--- a/app/test/test_ticketlock.c
+++ b/app/test/test_ticketlock.c
@@ -48,7 +48,7 @@
static rte_ticketlock_recursive_t tlr;
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_ticketlock_per_core(__rte_unused void *arg)
@@ -111,7 +111,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcore_count[lcore] < MAX_LOOP) {
@@ -153,11 +154,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index cac8fc0..dc15a80 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -202,7 +202,7 @@ struct mytimerinfo {
/* Need to synchronize worker lcores through multiple steps. */
enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
-static uint16_t lcore_state[RTE_MAX_LCORE];
+static RTE_ATOMIC(uint16_t) lcore_state[RTE_MAX_LCORE];
static void
main_init_workers(void)
@@ -210,7 +210,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_WAITING, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_WAITING,
+ rte_memory_order_relaxed);
}
}
@@ -220,10 +221,12 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_RUN_SIGNAL, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL,
+ rte_memory_order_release);
}
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_RUNNING,
+ rte_memory_order_acquire);
}
}
@@ -233,7 +236,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_FINISHED,
+ rte_memory_order_acquire);
}
}
@@ -242,8 +246,10 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- rte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, __ATOMIC_ACQUIRE);
- __atomic_store_n(&lcore_state[lcore_id], WORKER_RUNNING, __ATOMIC_RELEASE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[lcore_id], WORKER_RUN_SIGNAL,
+ rte_memory_order_acquire);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING,
+ rte_memory_order_release);
}
static void
@@ -251,7 +257,8 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- __atomic_store_n(&lcore_state[lcore_id], WORKER_FINISHED, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED,
+ rte_memory_order_release);
}
@@ -277,12 +284,12 @@ struct mytimerinfo {
unsigned int lcore_id = rte_lcore_id();
unsigned int main_lcore = rte_get_main_lcore();
int32_t my_collisions = 0;
- static uint32_t collisions;
+ static RTE_ATOMIC(uint32_t) collisions;
if (lcore_id == main_lcore) {
cb_count = 0;
test_failed = 0;
- __atomic_store_n(&collisions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&collisions, 0, rte_memory_order_relaxed);
timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
if (timers == NULL) {
printf("Test Failed\n");
@@ -310,7 +317,7 @@ struct mytimerinfo {
my_collisions++;
}
if (my_collisions != 0)
- __atomic_fetch_add(&collisions, my_collisions, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&collisions, my_collisions, rte_memory_order_relaxed);
/* wait long enough for timers to expire */
rte_delay_ms(100);
@@ -324,7 +331,7 @@ struct mytimerinfo {
/* now check that we get the right number of callbacks */
if (lcore_id == main_lcore) {
- my_collisions = __atomic_load_n(&collisions, __ATOMIC_RELAXED);
+ my_collisions = rte_atomic_load_explicit(&collisions, rte_memory_order_relaxed);
if (my_collisions != 0)
printf("- %d timer reset collisions (OK)\n", my_collisions);
rte_timer_manage();
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 42/45] app/test-eventdev: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (40 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 41/45] app/test: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 43/45] app/test-crypto-perf: " Tyler Retzlaff
` (2 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-eventdev/test_order_atq.c | 4 ++--
app/test-eventdev/test_order_common.c | 5 +++--
app/test-eventdev/test_order_common.h | 8 ++++----
app/test-eventdev/test_order_queue.c | 4 ++--
app/test-eventdev/test_perf_common.h | 6 +++---
5 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 2fee4b4..128d3f2 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index a9894c6..0fceace 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -189,7 +189,7 @@
evt_err("failed to allocate t->expected_flow_seq memory");
goto exp_nomem;
}
- __atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
t->err = false;
t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
@@ -296,7 +296,8 @@
while (t->err == false) {
uint64_t new_cycles = rte_get_timer_cycles();
- int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
+ int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
+ rte_memory_order_relaxed);
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index 1507265..65878d1 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -48,7 +48,7 @@ struct test_order {
* The atomic_* is an expensive operation,Since it is a functional test,
* We are using the atomic_ operation to reduce the code complexity.
*/
- uint64_t outstand_pkts;
+ RTE_ATOMIC(uint64_t) outstand_pkts;
enum evt_test_result result;
uint32_t nb_flows;
uint64_t nb_pkts;
@@ -95,7 +95,7 @@ struct test_order {
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
- uint64_t *const outstand_pkts)
+ RTE_ATOMIC(uint64_t) *const outstand_pkts)
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
@@ -113,7 +113,7 @@ struct test_order {
*/
expected_flow_seq[flow]++;
rte_pktmbuf_free(ev->mbuf);
- __atomic_fetch_sub(outstand_pkts, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -132,7 +132,7 @@ struct test_order {
const uint8_t port = w->port_id;\
const uint32_t nb_flows = t->nb_flows;\
uint32_t *expected_flow_seq = t->expected_flow_seq;\
- uint64_t *outstand_pkts = &t->outstand_pkts;\
+ RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
if (opt->verbose_level > 1)\
printf("%s(): lcore %d dev_id %d port=%d\n",\
__func__, rte_lcore_id(), dev_id, port)
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 80eaea5..a282ab2 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index 2b4f572..7f7c823 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -225,7 +225,7 @@ struct perf_elt {
* stored before updating the number of
* processed packets for worker lcores
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -270,7 +270,7 @@ struct perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -325,7 +325,7 @@ struct perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts += vec->nb_elem;
if (enable_fwd_latency) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 43/45] app/test-crypto-perf: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (41 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 42/45] app/test-eventdev: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 44/45] app/test-compress-perf: " Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 45/45] app/test-bbdev: " Tyler Retzlaff
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-crypto-perf/cperf_test_latency.c | 6 +++---
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +++++-----
app/test-crypto-perf/cperf_test_throughput.c | 10 +++++-----
app/test-crypto-perf/cperf_test_verify.c | 10 +++++-----
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 99b7d7c..b8ad6bf 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -136,7 +136,7 @@ struct priv_op_data {
uint32_t imix_idx = 0;
int ret = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
if (ctx == NULL)
return 0;
@@ -341,8 +341,8 @@ struct priv_op_data {
uint16_t exp = 0;
if (ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
"cycles, time (us)");
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 4a60f6d..7191d99 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -396,7 +396,7 @@ struct pmd_cyclecount_state {
state.lcore = rte_lcore_id();
state.linearize = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
static bool warmup = true;
/*
@@ -443,8 +443,8 @@ struct pmd_cyclecount_state {
uint16_t exp = 0;
if (!opts->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
@@ -460,8 +460,8 @@ struct pmd_cyclecount_state {
state.cycles_per_enq,
state.cycles_per_deq);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index e3d266d..c0891e7 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -107,7 +107,7 @@ struct cperf_throughput_ctx {
uint8_t burst_size_idx = 0;
uint32_t imix_idx = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
@@ -277,8 +277,8 @@ struct cperf_throughput_ctx {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst Size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -298,8 +298,8 @@ struct cperf_throughput_ctx {
throughput_gbps,
cycles_per_packet);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 3548509..222c7a1 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -216,7 +216,7 @@ struct cperf_op_result {
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
uint64_t ops_failed = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint64_t i;
uint16_t ops_unused = 0;
@@ -370,8 +370,8 @@ struct cperf_op_result {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -388,8 +388,8 @@ struct cperf_op_result {
ops_deqd_failed,
ops_failed);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore id, Buffer Size(B), "
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Failed Ops\n");
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 44/45] app/test-compress-perf: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (42 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 43/45] app/test-crypto-perf: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
2024-03-21 19:17 ` [PATCH v2 45/45] app/test-bbdev: " Tyler Retzlaff
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 ++--
app/test-compress-perf/comp_perf_test_throughput.c | 10 +++++-----
app/test-compress-perf/comp_perf_test_verify.c | 6 +++---
4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/app/test-compress-perf/comp_perf_test_common.h b/app/test-compress-perf/comp_perf_test_common.h
index d039e5a..085e269 100644
--- a/app/test-compress-perf/comp_perf_test_common.h
+++ b/app/test-compress-perf/comp_perf_test_common.h
@@ -14,7 +14,7 @@ struct cperf_mem_resources {
uint16_t qp_id;
uint8_t lcore_id;
- uint16_t print_info_once;
+ RTE_ATOMIC(uint16_t) print_info_once;
uint32_t total_bufs;
uint8_t *compressed_data;
diff --git a/app/test-compress-perf/comp_perf_test_cyclecount.c b/app/test-compress-perf/comp_perf_test_cyclecount.c
index 4d336ec..64e8faa 100644
--- a/app/test-compress-perf/comp_perf_test_cyclecount.c
+++ b/app/test-compress-perf/comp_perf_test_cyclecount.c
@@ -498,8 +498,8 @@ struct cperf_cyclecount_ctx {
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
diff --git a/app/test-compress-perf/comp_perf_test_throughput.c b/app/test-compress-perf/comp_perf_test_throughput.c
index 1f7072d..089d19c 100644
--- a/app/test-compress-perf/comp_perf_test_throughput.c
+++ b/app/test-compress-perf/comp_perf_test_throughput.c
@@ -336,7 +336,7 @@
struct cperf_benchmark_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->ver.options;
uint32_t lcore = rte_lcore_id();
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
int i, ret = EXIT_SUCCESS;
ctx->ver.mem.lcore_id = lcore;
@@ -345,8 +345,8 @@
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
@@ -413,8 +413,8 @@
}
exp = 0;
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("\n%12s%6s%12s%17s%15s%16s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]",
"Comp [Gbps]", "Decomp [Gbps]");
diff --git a/app/test-compress-perf/comp_perf_test_verify.c b/app/test-compress-perf/comp_perf_test_verify.c
index 7bd1807..09d97c5 100644
--- a/app/test-compress-perf/comp_perf_test_verify.c
+++ b/app/test-compress-perf/comp_perf_test_verify.c
@@ -396,7 +396,7 @@
struct cperf_verify_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->options;
int ret = EXIT_SUCCESS;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint32_t lcore = rte_lcore_id();
uint16_t exp = 0;
@@ -452,8 +452,8 @@
test_data->input_data_sz * 100;
if (!ctx->silent) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("%12s%6s%12s%17s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]");
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v2 45/45] app/test-bbdev: use rte stdatomic API
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
` (43 preceding siblings ...)
2024-03-21 19:17 ` [PATCH v2 44/45] app/test-compress-perf: " Tyler Retzlaff
@ 2024-03-21 19:17 ` Tyler Retzlaff
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-21 19:17 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++++++++++++----------------
1 file changed, 110 insertions(+), 73 deletions(-)
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index dcce00a..9694ed3 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -144,7 +144,7 @@ struct test_op_params {
uint16_t num_to_process;
uint16_t num_lcores;
int vector_mask;
- uint16_t sync;
+ RTE_ATOMIC(uint16_t) sync;
struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
};
@@ -159,9 +159,9 @@ struct thread_params {
uint8_t iter_count;
double iter_average;
double bler;
- uint16_t nb_dequeued;
- int16_t processing_status;
- uint16_t burst_sz;
+ RTE_ATOMIC(uint16_t) nb_dequeued;
+ RTE_ATOMIC(int16_t) processing_status;
+ RTE_ATOMIC(uint16_t) burst_sz;
struct test_op_params *op_params;
struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
@@ -3195,56 +3195,64 @@ typedef int (test_case_function)(struct active_device *ad,
}
if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
printf(
"Dequeue interrupt handler called for incorrect event!\n");
return;
}
- burst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);
+ burst_sz = rte_atomic_load_explicit(&tp->burst_sz, rte_memory_order_relaxed);
num_ops = tp->op_params->num_to_process;
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
deq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,
&tp->fft_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
deq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
&tp->mldts_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else /*RTE_BBDEV_OP_TURBO_ENC*/
deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
if (deq < burst_sz) {
printf(
"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
burst_sz, deq);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
- if (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) + deq < num_ops) {
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
return;
}
@@ -3288,7 +3296,8 @@ typedef int (test_case_function)(struct active_device *ad,
if (ret) {
printf("Buffers validation failed\n");
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
}
switch (test_vector.op_type) {
@@ -3315,7 +3324,8 @@ typedef int (test_case_function)(struct active_device *ad,
break;
default:
printf("Unknown op type: %d\n", test_vector.op_type);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
@@ -3324,7 +3334,7 @@ typedef int (test_case_function)(struct active_device *ad,
tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
((double)total_time / (double)rte_get_tsc_hz());
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
}
static int
@@ -3362,10 +3372,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3415,15 +3426,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3459,10 +3472,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3506,15 +3520,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3549,10 +3565,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3592,15 +3609,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3636,10 +3655,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3681,15 +3701,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3725,10 +3747,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3769,15 +3792,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3811,10 +3836,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, num_to_process);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_to_process);
@@ -3851,15 +3877,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3894,7 +3922,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4013,7 +4042,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4148,7 +4178,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4271,7 +4302,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4402,7 +4434,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4503,7 +4536,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4604,7 +4638,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4702,7 +4737,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4898,7 +4934,7 @@ typedef int (test_case_function)(struct active_device *ad,
else
return TEST_SKIPPED;
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -4921,7 +4957,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = bler_function(&t_params[0]);
/* Main core is always used */
@@ -5024,7 +5060,7 @@ typedef int (test_case_function)(struct active_device *ad,
throughput_function = throughput_pmd_lcore_enc;
}
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -5047,7 +5083,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = throughput_function(&t_params[0]);
/* Main core is always used */
@@ -5077,29 +5113,30 @@ typedef int (test_case_function)(struct active_device *ad,
* Wait for main lcore operations.
*/
tp = &t_params[0];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+ (rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed) !=
TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed);
/* Wait for worker lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp = &t_params[used_cores];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
- TEST_FAILED))
+ (rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed) != TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed);
}
/* Print throughput if test passed */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 00/45] use stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (47 preceding siblings ...)
2024-03-21 19:16 ` [PATCH v2 00/45] " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 01/45] net/mlx5: use rte " Tyler Retzlaff
` (45 more replies)
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (2 subsequent siblings)
51 siblings, 46 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
This series converts all non-generic built atomics to use the rte_atomic
macros that allow optional enablement of standard C11 atomics.
Use of generic atomics for non-scalar types are not converted in this
change and will be evaluated as a part of a separate series.
Note if this series ends up requiring too much rebasing due to tree
churn before it is merged i will break it up into smaller series.
v3:
* event/dsw, wrap all lines <= 80 chars, align arguments to
opening parenthesis.
* event/dlb2, wrap changed lines <= 80 chars, remove comments
referencing gcc __atomic built-ins.
* bus/vmbus, remove comment referencing gcc atomic built-ins,
fix mistake where monitor_mask was declared RTE_ATOMIC(uint32_t),
fix mistake where pending was not declared RTE_ATOMIC(uint32_t),
remove now unnecessary cast to __rte_atomic of pending (since
the field is now properly declare RTE_ATOMIC).
v2:
* drop the net/sfc driver from the series. the sfc driver
uses generic __atomic_store not handled by the current macros.
the cases where generic __atomic_xxx are used on objects that
can't be accepted by __atomic_xxx_n will be addressed in a
separate series.
Tyler Retzlaff (45):
net/mlx5: use rte stdatomic API
net/ixgbe: use rte stdatomic API
net/iavf: use rte stdatomic API
net/ice: use rte stdatomic API
net/i40e: use rte stdatomic API
net/hns3: use rte stdatomic API
net/bnxt: use rte stdatomic API
net/cpfl: use rte stdatomic API
net/af_xdp: use rte stdatomic API
net/octeon_ep: use rte stdatomic API
net/octeontx: use rte stdatomic API
net/cxgbe: use rte stdatomic API
net/gve: use rte stdatomic API
net/memif: use rte stdatomic API
net/thunderx: use rte stdatomic API
net/virtio: use rte stdatomic API
net/hinic: use rte stdatomic API
net/idpf: use rte stdatomic API
net/qede: use rte stdatomic API
net/ring: use rte stdatomic API
vdpa/mlx5: use rte stdatomic API
raw/ifpga: use rte stdatomic API
event/opdl: use rte stdatomic API
event/octeontx: use rte stdatomic API
event/dsw: use rte stdatomic API
dma/skeleton: use rte stdatomic API
crypto/octeontx: use rte stdatomic API
common/mlx5: use rte stdatomic API
common/idpf: use rte stdatomic API
common/iavf: use rte stdatomic API
baseband/acc: use rte stdatomic API
net/txgbe: use rte stdatomic API
net/null: use rte stdatomic API
event/dlb2: use rte stdatomic API
dma/idxd: use rte stdatomic API
crypto/ccp: use rte stdatomic API
common/cpt: use rte stdatomic API
bus/vmbus: use rte stdatomic API
examples: use rte stdatomic API
app/dumpcap: use rte stdatomic API
app/test: use rte stdatomic API
app/test-eventdev: use rte stdatomic API
app/test-crypto-perf: use rte stdatomic API
app/test-compress-perf: use rte stdatomic API
app/test-bbdev: use rte stdatomic API
app/dumpcap/main.c | 12 +-
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++--------
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 +-
app/test-compress-perf/comp_perf_test_throughput.c | 10 +-
app/test-compress-perf/comp_perf_test_verify.c | 6 +-
app/test-crypto-perf/cperf_test_latency.c | 6 +-
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +-
app/test-crypto-perf/cperf_test_throughput.c | 10 +-
app/test-crypto-perf/cperf_test_verify.c | 10 +-
app/test-eventdev/test_order_atq.c | 4 +-
app/test-eventdev/test_order_common.c | 5 +-
app/test-eventdev/test_order_common.h | 8 +-
app/test-eventdev/test_order_queue.c | 4 +-
app/test-eventdev/test_perf_common.h | 6 +-
app/test/test_bpf.c | 46 ++++--
app/test/test_distributor.c | 114 ++++++-------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++--
app/test/test_hash_multiwriter.c | 16 +-
app/test/test_hash_readwrite.c | 74 ++++-----
app/test/test_hash_readwrite_lf_perf.c | 88 +++++-----
app/test/test_lcores.c | 25 +--
app/test/test_lpm_perf.c | 14 +-
app/test/test_mcslock.c | 12 +-
app/test/test_mempool_perf.c | 9 +-
app/test/test_pflock.c | 13 +-
app/test/test_pmd_perf.c | 10 +-
app/test/test_rcu_qsbr_perf.c | 114 ++++++-------
app/test/test_ring_perf.c | 11 +-
app/test/test_ring_stress_impl.h | 10 +-
app/test/test_rwlock.c | 9 +-
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +--
app/test/test_spinlock.c | 9 +-
app/test/test_stack_perf.c | 12 +-
app/test/test_threads.c | 33 ++--
app/test/test_ticketlock.c | 9 +-
app/test/test_timer.c | 31 ++--
drivers/baseband/acc/rte_acc100_pmd.c | 36 ++--
drivers/baseband/acc/rte_vrb_pmd.c | 46 ++++--
drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
drivers/bus/vmbus/vmbus_channel.c | 8 +-
drivers/common/cpt/cpt_common.h | 2 +-
drivers/common/iavf/iavf_impl.c | 4 +-
drivers/common/idpf/idpf_common_device.h | 6 +-
drivers/common/idpf/idpf_common_rxtx.c | 14 +-
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 +-
drivers/common/mlx5/linux/mlx5_nl.c | 5 +-
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 +-
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 ++--
drivers/common/mlx5/mlx5_common_utils.h | 6 +-
drivers/common/mlx5/mlx5_malloc.c | 58 +++----
drivers/crypto/ccp/ccp_dev.c | 8 +-
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 +-
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +-
drivers/dma/skeleton/skeleton_dmadev.c | 5 +-
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
drivers/event/dlb2/dlb2.c | 34 ++--
drivers/event/dlb2/dlb2_priv.h | 15 +-
drivers/event/dlb2/dlb2_xstats.c | 2 +-
drivers/event/dsw/dsw_evdev.h | 6 +-
drivers/event/dsw/dsw_event.c | 47 ++++--
drivers/event/dsw/dsw_xstats.c | 4 +-
drivers/event/octeontx/timvf_evdev.h | 8 +-
drivers/event/octeontx/timvf_worker.h | 36 ++--
drivers/event/opdl/opdl_ring.c | 80 ++++-----
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 ++-
drivers/net/bnxt/bnxt_cpr.h | 4 +-
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 +-
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 +-
drivers/net/cpfl/cpfl_ethdev.c | 8 +-
drivers/net/cxgbe/clip_tbl.c | 12 +-
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 +--
drivers/net/cxgbe/cxgbe_ofld.h | 6 +-
drivers/net/cxgbe/l2t.c | 12 +-
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +--
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 +-
drivers/net/cxgbe/smt.h | 2 +-
drivers/net/gve/base/gve_osdep.h | 4 +-
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
drivers/net/hns3/hns3_cmd.c | 18 +-
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 ++--
drivers/net/hns3/hns3_ethdev.h | 32 ++--
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++----
drivers/net/hns3/hns3_intr.c | 36 ++--
drivers/net/hns3/hns3_intr.h | 4 +-
drivers/net/hns3/hns3_mbx.c | 6 +-
drivers/net/hns3/hns3_mp.c | 6 +-
drivers/net/hns3/hns3_rxtx.c | 10 +-
drivers/net/hns3/hns3_tm.c | 4 +-
drivers/net/i40e/i40e_ethdev.c | 4 +-
drivers/net/i40e/i40e_rxtx.c | 6 +-
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf.h | 16 +-
drivers/net/iavf/iavf_rxtx.c | 4 +-
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +-
drivers/net/ice/base/ice_osdep.h | 4 +-
drivers/net/ice/ice_dcf.c | 6 +-
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 +-
drivers/net/ice/ice_dcf_parent.c | 16 +-
drivers/net/ice/ice_ethdev.c | 12 +-
drivers/net/ice/ice_ethdev.h | 2 +-
drivers/net/idpf/idpf_ethdev.c | 7 +-
drivers/net/ixgbe/ixgbe_ethdev.c | 14 +-
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 +-
drivers/net/memif/memif.h | 4 +-
drivers/net/memif/rte_eth_memif.c | 50 +++---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 +-
drivers/net/mlx5/mlx5.c | 9 +-
drivers/net/mlx5/mlx5.h | 66 ++++----
drivers/net/mlx5/mlx5_flow.c | 37 +++--
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 +++--
drivers/net/mlx5/mlx5_flow_dv.c | 126 +++++++-------
drivers/net/mlx5/mlx5_flow_flex.c | 14 +-
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++----
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++--
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++--
drivers/net/mlx5/mlx5_hws_cnt.c | 71 ++++----
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +-
drivers/net/mlx5/mlx5_rx.h | 14 +-
drivers/net/mlx5/mlx5_rxq.c | 30 ++--
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 +-
drivers/net/mlx5/mlx5_txpp.c | 84 +++++-----
drivers/net/mlx5/mlx5_txq.c | 12 +-
drivers/net/mlx5/mlx5_utils.c | 10 +-
drivers/net/mlx5/mlx5_utils.h | 4 +-
drivers/net/null/rte_eth_null.c | 12 +-
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +-
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +-
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx2_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx_ep_common.h | 4 +-
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 +-
drivers/net/octeontx/octeontx_ethdev.c | 8 +-
drivers/net/qede/base/bcm_osal.c | 6 +-
drivers/net/ring/rte_eth_ring.c | 8 +-
drivers/net/thunderx/nicvf_rxtx.c | 9 +-
drivers/net/thunderx/nicvf_struct.h | 4 +-
drivers/net/txgbe/txgbe_ethdev.c | 12 +-
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
drivers/net/virtio/virtio_ring.h | 4 +-
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 +-
drivers/net/virtio/virtqueue.h | 32 ++--
drivers/raw/ifpga/ifpga_rawdev.c | 9 +-
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +--
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +-
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++---
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 +-
examples/bbdev_app/main.c | 13 +-
examples/l2fwd-event/l2fwd_common.h | 4 +-
examples/l2fwd-event/l2fwd_event.c | 24 +--
examples/l2fwd-jobstats/main.c | 11 +-
.../client_server_mp/mp_server/main.c | 6 +-
examples/server_node_efd/efd_server/main.c | 6 +-
examples/vhost/main.c | 32 ++--
examples/vhost/main.h | 4 +-
examples/vhost/virtio_net.c | 13 +-
examples/vhost_blk/vhost_blk.c | 8 +-
examples/vm_power_manager/channel_monitor.c | 9 +-
180 files changed, 1643 insertions(+), 1500 deletions(-)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 01/45] net/mlx5: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 02/45] net/ixgbe: " Tyler Retzlaff
` (44 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 ++-
drivers/net/mlx5/mlx5.c | 9 ++-
drivers/net/mlx5/mlx5.h | 66 ++++++++---------
drivers/net/mlx5/mlx5_flow.c | 37 +++++-----
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 ++++++-----
drivers/net/mlx5/mlx5_flow_dv.c | 126 ++++++++++++++++----------------
drivers/net/mlx5/mlx5_flow_flex.c | 14 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++++++++-------
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++++----
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++++----
drivers/net/mlx5/mlx5_hws_cnt.c | 71 +++++++++---------
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +--
drivers/net/mlx5/mlx5_rx.h | 14 ++--
drivers/net/mlx5/mlx5_rxq.c | 30 ++++----
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 ++---
drivers/net/mlx5/mlx5_txpp.c | 84 ++++++++++-----------
drivers/net/mlx5/mlx5_txq.c | 12 +--
drivers/net/mlx5/mlx5_utils.c | 10 +--
drivers/net/mlx5/mlx5_utils.h | 4 +-
22 files changed, 351 insertions(+), 345 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index 40ea9d2..70bba6c 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -1918,9 +1918,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
return -ENOTSUP;
}
/* Check there is no concurrent mapping in other thread. */
- if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
- base, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
+ base,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
return 0;
}
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index b54f3cc..63da8f4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1117,7 +1117,7 @@
return 0;
}
/* Only need to check refcnt, 0 after "sh" is allocated. */
- if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+ if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
priv->lb_used = 1;
return 0;
@@ -1163,7 +1163,7 @@
claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
sh->self_lb.ibv_cq = NULL;
}
- __atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
return -rte_errno;
#else
RTE_SET_USED(dev);
@@ -1186,8 +1186,9 @@
if (!priv->lb_used)
return;
- MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
- if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {
+ MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
+ if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
if (sh->self_lb.qp) {
claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
sh->self_lb.qp = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d1a6382..2ff94db 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -855,8 +855,8 @@
ct_pool = mng->pools[idx];
for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
ct = &ct_pool->actions[i];
- val = __atomic_fetch_sub(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
MLX5_ASSERT(val == 1);
if (val > 1)
cnt++;
@@ -1082,7 +1082,8 @@
DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
return -ENOTSUP;
}
- if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
+ if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
+ rte_memory_order_relaxed) + 1 > 1)
return 0;
priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
@@ -1173,7 +1174,7 @@
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
- if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
return;
mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
mlx5_free(fp->flex.devx_fp);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0091a24..77c84b8 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -378,7 +378,7 @@ struct mlx5_drop {
struct mlx5_lb_ctx {
struct ibv_qp *qp; /* QP object. */
void *ibv_cq; /* Completion queue. */
- uint16_t refcnt; /* Reference count for representors. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
/* HW steering queue job descriptor type. */
@@ -481,10 +481,10 @@ enum mlx5_counter_type {
/* Counter age parameter. */
struct mlx5_age_param {
- uint16_t state; /**< Age state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
uint16_t port_id; /**< Port id of the counter. */
uint32_t timeout:24; /**< Aging timeout in seconds. */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/**< Time in seconds since last hit (atomically accessed). */
void *context; /**< Flow counter age context. */
};
@@ -497,7 +497,7 @@ struct flow_counter_stats {
/* Shared counters information for counters. */
struct mlx5_flow_counter_shared {
union {
- uint32_t refcnt; /* Only for shared action management. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
uint32_t id; /* User counter ID for legacy sharing. */
};
};
@@ -588,7 +588,7 @@ struct mlx5_counter_stats_raw {
/* Counter global management structure. */
struct mlx5_flow_counter_mng {
- volatile uint16_t n_valid; /* Number of valid pools. */
+ volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
uint16_t last_pool_idx; /* Last used pool index */
int min_id; /* The minimum counter ID in the pools. */
int max_id; /* The maximum counter ID in the pools. */
@@ -654,7 +654,7 @@ struct mlx5_aso_sq {
struct mlx5_aso_age_action {
LIST_ENTRY(mlx5_aso_age_action) next;
void *dr_action;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
/* Following fields relevant only when action is active. */
uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
struct mlx5_age_param age_params;
@@ -688,7 +688,7 @@ struct mlx5_geneve_tlv_option_resource {
rte_be16_t option_class; /* geneve tlv opt class.*/
uint8_t option_type; /* geneve tlv opt type.*/
uint8_t length; /* geneve tlv opt length. */
- uint32_t refcnt; /* geneve tlv object reference counter */
+ RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
};
@@ -903,7 +903,7 @@ struct mlx5_flow_meter_policy {
uint16_t group;
/* The group. */
rte_spinlock_t sl;
- uint32_t ref_cnt;
+ RTE_ATOMIC(uint32_t) ref_cnt;
/* Use count. */
struct rte_flow_pattern_template *hws_item_templ;
/* Hardware steering item templates. */
@@ -1038,7 +1038,7 @@ struct mlx5_flow_meter_profile {
struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
/**< srtcm_rfc2697 struct. */
};
- uint32_t ref_cnt; /**< Use count. */
+ RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
uint32_t g_support:1; /**< If G color will be generated. */
uint32_t y_support:1; /**< If Y color will be generated. */
uint32_t initialized:1; /**< Initialized. */
@@ -1078,7 +1078,7 @@ struct mlx5_aso_mtr {
enum mlx5_aso_mtr_type type;
struct mlx5_flow_meter_info fm;
/**< Pointer to the next aso flow meter structure. */
- uint8_t state; /**< ASO flow meter state. */
+ RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
uint32_t offset;
enum rte_color init_color;
};
@@ -1124,7 +1124,7 @@ struct mlx5_flow_mtr_mng {
/* Default policy table. */
uint32_t def_policy_id;
/* Default policy id. */
- uint32_t def_policy_ref_cnt;
+ RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
/** def_policy meter use count. */
struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
/* Meter drop table. */
@@ -1197,8 +1197,8 @@ struct mlx5_txpp_wq {
/* Tx packet pacing internal timestamp. */
struct mlx5_txpp_ts {
- uint64_t ci_ts;
- uint64_t ts;
+ RTE_ATOMIC(uint64_t) ci_ts;
+ RTE_ATOMIC(uint64_t) ts;
};
/* Tx packet pacing structure. */
@@ -1221,12 +1221,12 @@ struct mlx5_dev_txpp {
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
/* Statistics counters. */
- uint64_t err_miss_int; /* Missed service interrupt. */
- uint64_t err_rearm_queue; /* Rearm Queue errors. */
- uint64_t err_clock_queue; /* Clock Queue errors. */
- uint64_t err_ts_past; /* Timestamp in the past. */
- uint64_t err_ts_future; /* Timestamp in the distant future. */
- uint64_t err_ts_order; /* Timestamp not in ascending order. */
+ RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
+ RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
+ RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
+ RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
+ RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
+ RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
};
/* Sample ID information of eCPRI flex parser structure. */
@@ -1287,16 +1287,16 @@ struct mlx5_aso_ct_action {
void *dr_action_orig;
/* General action object for reply dir. */
void *dr_action_rply;
- uint32_t refcnt; /* Action used count in device flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */
uint16_t peer; /* The only peer port index could also use this CT. */
- enum mlx5_aso_ct_state state; /* ASO CT state. */
+ RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
bool is_original; /* The direction of the DR action to be used. */
};
/* CT action object state update. */
#define MLX5_ASO_CT_UPDATE_STATE(c, s) \
- __atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+ rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -1370,7 +1370,7 @@ struct mlx5_flex_pattern_field {
/* Port flex item context. */
struct mlx5_flex_item {
struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
- uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
uint32_t mapnum; /* Number of pattern translation entries. */
struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
@@ -1383,7 +1383,7 @@ struct mlx5_flex_item {
#define MLX5_SRV6_SAMPLE_NUM 5
/* Mlx5 internal flex parser profile structure. */
struct mlx5_internal_flex_parser_profile {
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
struct mlx5_flex_item flex; /* Hold map info for modify field. */
};
@@ -1512,9 +1512,9 @@ struct mlx5_dev_ctx_shared {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
#endif
- struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
- struct mlx5_hlist *modify_cmds;
- struct mlx5_hlist *tag_table;
+ RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
+ RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
+ RTE_ATOMIC(struct mlx5_hlist *) tag_table;
struct mlx5_list *port_id_action_list; /* Port ID action list. */
struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
struct mlx5_list *sample_action_list; /* List of sample actions. */
@@ -1525,7 +1525,7 @@ struct mlx5_dev_ctx_shared {
/* SW steering counters management structure. */
void *default_miss_action; /* Default miss action. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
- struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
+ RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
/* Shared interrupt handler section. */
struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
@@ -1570,7 +1570,7 @@ struct mlx5_dev_ctx_shared {
* Caution, secondary process may rebuild the struct during port start.
*/
struct mlx5_proc_priv {
- void *hca_bar;
+ RTE_ATOMIC(void *) hca_bar;
/* Mapped HCA PCI BAR area. */
size_t uar_table_sz;
/* Size of UAR register table. */
@@ -1635,7 +1635,7 @@ struct mlx5_rxq_obj {
/* Indirection table. */
struct mlx5_ind_table_obj {
LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
union {
void *ind_table; /**< Indirection table. */
struct mlx5_devx_obj *rqt; /* DevX RQT object. */
@@ -1826,7 +1826,7 @@ enum mlx5_quota_state {
};
struct mlx5_quota {
- uint8_t state; /* object state */
+ RTE_ATOMIC(uint8_t) state; /* object state */
uint8_t mode; /* metering mode */
/**
* Keep track of application update types.
@@ -1955,7 +1955,7 @@ struct mlx5_priv {
uint32_t flex_item_map; /* Map of allocated flex item elements. */
uint32_t nb_queue; /* HW steering queue number. */
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
- uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
+ RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -2007,7 +2007,7 @@ struct mlx5_priv {
#endif
struct rte_eth_dev *shared_host; /* Host device for HW steering. */
- uint16_t shared_refcnt; /* HW steering host reference counter. */
+ RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f31fdfb..1954975 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4623,8 +4623,8 @@ struct mlx5_translated_action_handle {
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
- __atomic_fetch_add(&shared_rss->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
+ rte_memory_order_relaxed);
return idx;
default:
break;
@@ -7459,7 +7459,7 @@ struct mlx5_list_entry *
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
- __atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
mlx5_free(default_miss_ctx.queue);
}
mlx5_flow_pop_thread_workspace();
@@ -7470,10 +7470,10 @@ struct mlx5_list_entry *
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
if (rss_desc->shared_rss)
- __atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
+ rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
+ rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
@@ -7976,7 +7976,8 @@ struct rte_flow *
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
+ rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
flow_mreg_del_copy_action(dev, flow);
@@ -9456,7 +9457,7 @@ struct mlx5_flow_workspace*
{
uint32_t pools_n, us;
- pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
+ pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -9558,17 +9559,17 @@ struct mlx5_flow_workspace*
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
age_param = MLX5_CNT_TO_AGE(cnt);
- if (__atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) != AGE_CANDIDATE)
+ if (rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) != AGE_CANDIDATE)
continue;
if (cur->data[i].hits != prev->data[i].hits) {
- __atomic_store_n(&age_param->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
continue;
}
- if (__atomic_fetch_add(&age_param->sec_since_last_hit,
+ if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
time_delta,
- __ATOMIC_RELAXED) + time_delta <= age_param->timeout)
+ rte_memory_order_relaxed) + time_delta <= age_param->timeout)
continue;
/**
* Hold the lock first, or if between the
@@ -9579,10 +9580,10 @@ struct mlx5_flow_workspace*
priv = rte_eth_devices[age_param->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_TMOUT, false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_TMOUT,
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
}
@@ -11407,7 +11408,7 @@ struct tunnel_db_element_release_ctx {
{
struct tunnel_db_element_release_ctx *ctx = x;
ctx->ret = 0;
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 0065727..943f759 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1049,7 +1049,7 @@ struct mlx5_flow_tunnel {
LIST_ENTRY(mlx5_flow_tunnel) chain;
struct rte_flow_tunnel app_tunnel; /** app tunnel copy */
uint32_t tunnel_id; /** unique tunnel ID */
- uint32_t refctn;
+ RTE_ATOMIC(uint32_t) refctn;
struct rte_flow_action action;
struct rte_flow_item item;
struct mlx5_hlist *groups; /** tunnel groups */
@@ -1470,7 +1470,7 @@ struct rte_flow_pattern_template {
struct mlx5dr_match_template *mt; /* mlx5 match template. */
uint64_t item_flags; /* Item layer flags. */
uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
/*
* If true, then rule pattern should be prepended with
* represented_port pattern item.
@@ -1502,7 +1502,7 @@ struct rte_flow_actions_template {
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint16_t recom_off; /* Offset of DR IPv6 routing push remove action. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
uint8_t flex_item; /* flex item index. */
};
@@ -1855,7 +1855,7 @@ struct rte_flow_template_table {
/* Shared RSS action structure */
struct mlx5_shared_action_rss {
ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
- uint32_t refcnt; /**< Atomically accessed refcnt. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
struct mlx5_ind_table_obj *ind_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index ab9eb21..a94b228 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -619,7 +619,7 @@
uint8_t *u8addr;
uint8_t hit;
- if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=
AGE_CANDIDATE)
continue;
byte = 63 - (j / 8);
@@ -627,13 +627,13 @@
u8addr = (uint8_t *)addr;
hit = (u8addr[byte] >> offset) & 0x1;
if (hit) {
- __atomic_store_n(&ap->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
} else {
struct mlx5_priv *priv;
- __atomic_fetch_add(&ap->sec_since_last_hit,
- diff, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
+ diff, rte_memory_order_relaxed);
/* If timeout passed add to aged-out list. */
if (ap->sec_since_last_hit <= ap->timeout)
continue;
@@ -641,12 +641,11 @@
rte_eth_devices[ap->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&ap->state,
+ if (rte_atomic_compare_exchange_strong_explicit(&ap->state,
&expected,
AGE_TMOUT,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
LIST_INSERT_HEAD(&age_info->aged_aso,
act, next);
MLX5_AGE_SET(age_info,
@@ -946,10 +945,10 @@
for (i = 0; i < n; ++i) {
aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
MLX5_ASSERT(aso_mtr);
- verdict = __atomic_compare_exchange_n(&aso_mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
}
sq->tail += n;
@@ -1005,10 +1004,10 @@
mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,
MLX5_INDIRECT_ACTION_IDX_GET(job->action));
MLX5_ASSERT(mtr);
- verdict = __atomic_compare_exchange_n(&mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
}
@@ -1103,7 +1102,7 @@
struct mlx5_aso_sq *sq;
struct mlx5_dev_ctx_shared *sh = priv->sh;
uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
- uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+ uint8_t state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);
poll_cq_t poll_mtr_cq =
is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;
@@ -1112,7 +1111,7 @@
sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);
do {
poll_mtr_cq(priv, sq);
- if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==
ASO_METER_READY)
return 0;
/* Waiting for CQE ready. */
@@ -1411,7 +1410,7 @@
uint16_t wqe_idx;
struct mlx5_aso_ct_pool *pool;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_FREE) {
DRV_LOG(ERR, "Fail: No context to query");
@@ -1620,12 +1619,12 @@
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
else
sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
/* Waiting for CQE ready, consider should block or sleep. */
@@ -1791,7 +1790,7 @@
bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (sh->config.dv_flow_en == 2)
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
@@ -1807,7 +1806,7 @@
}
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ state = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_READY ||
state == ASO_CONNTRACK_QUERY)
return 0;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d434c67..f9c56af 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -313,7 +313,7 @@ enum mlx5_l3_tunnel_detection {
}
static inline struct mlx5_hlist *
-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,
const char *name, uint32_t size, bool direct_key,
bool lcores_share, void *ctx,
mlx5_list_create_cb cb_create,
@@ -327,7 +327,7 @@ enum mlx5_l3_tunnel_detection {
struct mlx5_hlist *expected = NULL;
char s[MLX5_NAME_SIZE];
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
if (likely(hl))
return hl;
snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
@@ -341,11 +341,11 @@ enum mlx5_l3_tunnel_detection {
"cannot allocate resource memory");
return NULL;
}
- if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_hlist_destroy(hl);
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
}
return hl;
}
@@ -6139,8 +6139,8 @@ struct mlx5_list_entry *
static struct mlx5_indexed_pool *
flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
{
- struct mlx5_indexed_pool *ipool = __atomic_load_n
- (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+ struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
+ (&sh->mdh_ipools[index], rte_memory_order_seq_cst);
if (!ipool) {
struct mlx5_indexed_pool *expected = NULL;
@@ -6165,13 +6165,13 @@ struct mlx5_list_entry *
ipool = mlx5_ipool_create(&cfg);
if (!ipool)
return NULL;
- if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
- &expected, ipool, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
+ &expected, ipool,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_ipool_destroy(ipool);
- ipool = __atomic_load_n(&sh->mdh_ipools[index],
- __ATOMIC_SEQ_CST);
+ ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
+ rte_memory_order_seq_cst);
}
}
return ipool;
@@ -6992,9 +6992,9 @@ struct mlx5_list_entry *
age_info = GET_PORT_AGE_INFO(priv);
age_param = flow_dv_counter_idx_get_age(dev, counter);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since counter may still in process.
@@ -7002,7 +7002,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -7038,8 +7038,8 @@ struct mlx5_list_entry *
* indirect action API, shared info is 1 before the reduction,
* so this condition is failed and function doesn't return here.
*/
- if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
- __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
+ rte_memory_order_relaxed) - 1)
return;
}
cnt->pool = pool;
@@ -10203,8 +10203,8 @@ struct mlx5_list_entry *
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
} else {
ret = rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -10243,8 +10243,8 @@ struct mlx5_list_entry *
geneve_opt_resource->option_class = geneve_opt_v->option_class;
geneve_opt_resource->option_type = geneve_opt_v->option_type;
geneve_opt_resource->length = geneve_opt_v->option_len;
- __atomic_store_n(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
}
exit:
rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
@@ -12192,8 +12192,8 @@ struct mlx5_list_entry *
(void *)(uintptr_t)(dev_flow->flow_idx);
age_param->timeout = age->timeout;
age_param->port_id = dev->data->port_id;
- __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);
return counter;
}
@@ -13241,9 +13241,9 @@ struct mlx5_list_entry *
uint16_t expected = AGE_CANDIDATE;
age_info = GET_PORT_AGE_INFO(priv);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since age action may still in process.
@@ -13251,7 +13251,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
LIST_REMOVE(age, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -13275,7 +13275,7 @@ struct mlx5_list_entry *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
- uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
flow_dv_aso_age_remove_from_age(dev, age);
@@ -13451,7 +13451,7 @@ struct mlx5_list_entry *
return 0; /* 0 is an error. */
}
}
- __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);
return pool->index | ((age_free->offset + 1) << 16);
}
@@ -13481,10 +13481,10 @@ struct mlx5_list_entry *
aso_age->age_params.context = context;
aso_age->age_params.timeout = timeout;
aso_age->age_params.port_id = dev->data->port_id;
- __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
- __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
static void
@@ -13666,12 +13666,12 @@ struct mlx5_list_entry *
uint32_t ret;
struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
/* Cannot release when CT is in the ASO SQ. */
if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
return -1;
- ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
if (ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -13861,7 +13861,7 @@ struct mlx5_list_entry *
pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
/* 0: inactive, 1: created, 2+: used by flows. */
- __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
if (!ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -14813,8 +14813,8 @@ struct mlx5_list_entry *
age_act = flow_aso_age_get_by_idx(dev, owner_idx);
if (flow->age == 0) {
flow->age = owner_idx;
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&age_act->refcnt, 1,
+ rte_memory_order_relaxed);
}
age_act_pos = actions_n++;
action_flags |= MLX5_FLOW_ACTION_AGE;
@@ -14851,9 +14851,9 @@ struct mlx5_list_entry *
} else {
if (flow->counter == 0) {
flow->counter = owner_idx;
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&cnt_act->shared_info.refcnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
}
/* Save information first, will apply later. */
action_flags |= MLX5_FLOW_ACTION_COUNT;
@@ -15185,8 +15185,8 @@ struct mlx5_list_entry *
flow->indirect_type =
MLX5_INDIRECT_ACTION_TYPE_CT;
flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
}
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
@@ -15855,7 +15855,7 @@ struct mlx5_list_entry *
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
- __atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
}
void
@@ -16038,8 +16038,8 @@ struct mlx5_list_entry *
sh->geneve_tlv_option_resource;
rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
if (geneve_opt_resource) {
- if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED) - 1)) {
+ if (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
claim_zero(mlx5_devx_cmd_destroy
(geneve_opt_resource->obj));
mlx5_free(sh->geneve_tlv_option_resource);
@@ -16448,7 +16448,7 @@ struct mlx5_list_entry *
/* Update queue with indirect table queue memoyr. */
origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
- __atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
@@ -16494,9 +16494,9 @@ struct mlx5_list_entry *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid shared action");
- if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
- 0, 0, __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,
+ 0, rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -16632,10 +16632,10 @@ struct rte_flow_action_handle *
return __flow_dv_action_rss_release(dev, idx, error);
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
- if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
- &no_flow_refcnt, 1, false,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
+ &no_flow_refcnt, 1,
+ rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -17595,13 +17595,13 @@ struct rte_flow_action_handle *
case MLX5_INDIRECT_ACTION_TYPE_AGE:
age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
resp = data;
- resp->aged = __atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) == AGE_TMOUT ?
+ resp->aged = rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) == AGE_TMOUT ?
1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_dv_query_count(dev, idx, data, error);
@@ -17678,12 +17678,12 @@ struct rte_flow_action_handle *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
}
- resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+ resp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==
AGE_TMOUT ? 1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 4ae03a2..8a02247 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -86,7 +86,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map |= 1u << idx;
}
}
@@ -107,7 +107,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map &= ~(1u << idx);
rte_spinlock_unlock(&priv->flex_item_sl);
}
@@ -379,7 +379,7 @@
return ret;
}
if (acquire)
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return ret;
}
@@ -414,7 +414,7 @@
rte_errno = -EINVAL;
return -EINVAL;
}
- __atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
return 0;
}
@@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *
}
flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
/* Mark initialized flex item valid. */
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return (struct rte_flow_item_flex_handle *)flex;
error:
@@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
"invalid flex item handle value");
}
- if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
+ rte_memory_order_acquire, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&priv->flex_item_sl);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9ebbe66..8891f3c 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -715,7 +715,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
}
if (acts->mark)
- if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed) - 1))
flow_hw_rxq_flag_set(dev, false);
if (acts->jump) {
@@ -2298,7 +2299,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
goto err;
acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
- __atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
@@ -4537,8 +4539,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
uint8_t i;
for (i = 0; i < nb_action_templates; i++) {
- uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED);
+ uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (refcnt <= 1) {
rte_flow_error_set(error, EINVAL,
@@ -4576,8 +4578,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
while (i--) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_sub_fetch(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
return rte_errno;
}
@@ -4748,8 +4750,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
- ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED) + 1;
+ ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (ret <= 1) {
rte_errno = EINVAL;
goto it_error;
@@ -4800,14 +4802,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
for (i = 0; i < nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_fetch_sub(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
i = nb_item_templates;
it_error:
while (i--)
- __atomic_fetch_sub(&item_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
error:
err = rte_errno;
if (tbl) {
@@ -5039,12 +5041,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
LIST_REMOVE(table, next);
for (i = 0; i < table->nb_item_templates; i++)
- __atomic_fetch_sub(&table->its[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
+ 1, rte_memory_order_relaxed);
for (i = 0; i < table->nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
- __atomic_fetch_sub(&table->ats[i].action_template->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
+ 1, rte_memory_order_relaxed);
}
flow_hw_destroy_table_multi_pattern_ctx(table);
if (table->matcher_info[0].matcher)
@@ -7287,7 +7289,7 @@ enum mlx5_hw_indirect_list_relative_position {
if (!at->tmpl)
goto error;
at->action_flags = action_flags;
- __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
return at;
error:
@@ -7323,7 +7325,7 @@ enum mlx5_hw_indirect_list_relative_position {
uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Action template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -7897,7 +7899,7 @@ enum mlx5_hw_indirect_list_relative_position {
break;
}
}
- __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
rte_errno = pattern_template_validate(dev, &it, 1);
if (rte_errno)
goto error;
@@ -7933,7 +7935,7 @@ enum mlx5_hw_indirect_list_relative_position {
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Item template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -10513,7 +10515,8 @@ struct mlx5_list_entry *
}
dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
priv->shared_host = host_dev;
- __atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
}
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
@@ -10698,7 +10701,8 @@ struct mlx5_list_entry *
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
if (priv->hw_q) {
@@ -10814,7 +10818,8 @@ struct mlx5_list_entry *
priv->hw_q = NULL;
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
mlx5_free(priv->hw_attr);
@@ -10872,8 +10877,8 @@ struct mlx5_list_entry *
NULL,
"Invalid CT destruction index");
}
- __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
+ rte_memory_order_relaxed);
mlx5_ipool_free(pool->cts, idx);
return 0;
}
@@ -11572,7 +11577,7 @@ struct mlx5_hw_q_job *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_REPORTED:
case HWS_AGE_AGED_OUT_NOT_REPORTED:
resp->aged = 1;
@@ -11592,8 +11597,8 @@ struct mlx5_hw_q_job *
}
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (¶m->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (¶m->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index ca361f7..da3289b 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -2055,9 +2055,9 @@ struct mlx5_flow_meter_policy *
NULL, "Meter profile id not valid.");
/* Meter policy must exist. */
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
if (!priv->sh->config.dv_esw_en)
domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -2137,7 +2137,7 @@ struct mlx5_flow_meter_policy *
fm->is_enable = params->meter_enable;
fm->shared = !!shared;
fm->color_aware = !!params->use_prev_mtr_color;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
fm->def_policy = 1;
fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
@@ -2166,7 +2166,7 @@ struct mlx5_flow_meter_policy *
}
fm->active_state = params->meter_enable;
if (mtr_policy)
- __atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
error:
mlx5_flow_destroy_mtr_tbls(dev, fm);
@@ -2271,8 +2271,8 @@ struct mlx5_flow_meter_policy *
NULL, "Failed to create devx meter.");
}
fm->active_state = params->meter_enable;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
- __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
}
#endif
@@ -2295,7 +2295,7 @@ struct mlx5_flow_meter_policy *
if (fmp == NULL)
return -1;
/* Update dependencies. */
- __atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);
fm->profile = NULL;
/* Remove from list. */
if (!priv->sh->meter_aso_en) {
@@ -2313,15 +2313,15 @@ struct mlx5_flow_meter_policy *
}
mlx5_flow_destroy_mtr_tbls(dev, fm);
if (fm->def_policy)
- __atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
+ 1, rte_memory_order_relaxed);
if (priv->sh->meter_aso_en) {
if (!fm->def_policy) {
mtr_policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
if (mtr_policy)
- __atomic_fetch_sub(&mtr_policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
fm->policy_id = 0;
}
fm->def_policy = 0;
@@ -2424,13 +2424,13 @@ struct mlx5_flow_meter_policy *
RTE_MTR_ERROR_TYPE_UNSPECIFIED,
NULL, "Meter object is being used.");
/* Destroy the meter profile. */
- __atomic_fetch_sub(&fm->profile->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
+ 1, rte_memory_order_relaxed);
/* Destroy the meter policy. */
policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
- __atomic_fetch_sub(&policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c
index 14a2a8b..6ad0e8a 100644
--- a/drivers/net/mlx5/mlx5_flow_quota.c
+++ b/drivers/net/mlx5/mlx5_flow_quota.c
@@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
struct mlx5_quota *quota_obj =
sq->elts[(sq->tail + i) & mask].quota_obj;
- __atomic_compare_exchange_n("a_obj->state, &state,
- MLX5_QUOTA_STATE_READY, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit("a_obj->state, &state,
+ MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
}
}
@@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
rte_spinlock_lock(&sq->sqsl);
mlx5_quota_cmd_completion_handle(sq);
rte_spinlock_unlock(&sq->sqsl);
- if (__atomic_load_n("a_obj->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit("a_obj->state, rte_memory_order_relaxed) ==
MLX5_QUOTA_STATE_READY)
return 0;
} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
{
uint8_t state = MLX5_QUOTA_STATE_READY;
- bool verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ bool verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict)
return rte_flow_error_set(error, EBUSY,
@@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
async_job ? async_job : &sync_job, push, NULL);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
async_job ? async_job : &sync_job, push,
(void *)(uintptr_t)update->conf);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -593,9 +593,9 @@ struct rte_flow_action_handle *
NULL, "quota: failed to allocate quota object");
return NULL;
}
- verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: new quota object has invalid state");
@@ -616,8 +616,8 @@ struct rte_flow_action_handle *
(void *)(uintptr_t)conf);
if (ret) {
mlx5_ipool_free(qctx->quota_ipool, id);
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
+ rte_memory_order_relaxed);
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: WR failure");
return 0;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index c31f2f3..1b625e0 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -149,7 +149,7 @@
}
if (param->timeout == 0)
continue;
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_NOT_REPORTED:
case HWS_AGE_AGED_OUT_REPORTED:
/* Already aged-out, no action is needed. */
@@ -171,8 +171,8 @@
hits = rte_be_to_cpu_64(stats[i].hits);
if (param->nb_cnts == 1) {
if (hits != param->accumulator_last_hits) {
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
param->accumulator_last_hits = hits;
continue;
}
@@ -184,8 +184,8 @@
param->accumulator_cnt = 0;
if (param->accumulator_last_hits !=
param->accumulator_hits) {
- __atomic_store_n(¶m->sec_since_last_hit,
- 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit,
+ 0, rte_memory_order_relaxed);
param->accumulator_last_hits =
param->accumulator_hits;
param->accumulator_hits = 0;
@@ -193,9 +193,9 @@
}
param->accumulator_hits = 0;
}
- if (__atomic_fetch_add(¶m->sec_since_last_hit, time_delta,
- __ATOMIC_RELAXED) + time_delta <=
- __atomic_load_n(¶m->timeout, __ATOMIC_RELAXED))
+ if (rte_atomic_fetch_add_explicit(¶m->sec_since_last_hit, time_delta,
+ rte_memory_order_relaxed) + time_delta <=
+ rte_atomic_load_explicit(¶m->timeout, rte_memory_order_relaxed))
continue;
/* Prepare the relevant ring for this AGE parameter */
if (priv->hws_strict_queue)
@@ -203,10 +203,10 @@
else
r = age_info->hw_age.aged_list;
/* Changing the state atomically and insert it into the ring. */
- if (__atomic_compare_exchange_n(¶m->state, &expected1,
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected1,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
sizeof(uint32_t),
1, NULL);
@@ -221,11 +221,10 @@
*/
expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
if (ret == 0 &&
- !__atomic_compare_exchange_n(¶m->state,
+ !rte_atomic_compare_exchange_strong_explicit(¶m->state,
&expected2, expected1,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected2 == HWS_AGE_FREE)
mlx5_hws_age_param_free(priv,
param->own_cnt_index,
@@ -235,10 +234,10 @@
if (!priv->hws_strict_queue)
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
} else {
- __atomic_compare_exchange_n(¶m->state, &expected2,
+ rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected2,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
}
}
/* The event is irrelevant in strict queue mode. */
@@ -796,8 +795,8 @@ struct mlx5_hws_cnt_pool *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
- switch (__atomic_exchange_n(¶m->state, HWS_AGE_FREE,
- __ATOMIC_RELAXED)) {
+ switch (rte_atomic_exchange_explicit(¶m->state, HWS_AGE_FREE,
+ rte_memory_order_relaxed)) {
case HWS_AGE_CANDIDATE:
case HWS_AGE_AGED_OUT_REPORTED:
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
@@ -862,8 +861,8 @@ struct mlx5_hws_cnt_pool *
"cannot allocate AGE parameter");
return 0;
}
- MLX5_ASSERT(__atomic_load_n(¶m->state,
- __ATOMIC_RELAXED) == HWS_AGE_FREE);
+ MLX5_ASSERT(rte_atomic_load_explicit(¶m->state,
+ rte_memory_order_relaxed) == HWS_AGE_FREE);
if (shared) {
param->nb_cnts = 0;
param->accumulator_hits = 0;
@@ -914,9 +913,9 @@ struct mlx5_hws_cnt_pool *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
if (update_ade->timeout_valid) {
- uint32_t old_timeout = __atomic_exchange_n(¶m->timeout,
+ uint32_t old_timeout = rte_atomic_exchange_explicit(¶m->timeout,
update_ade->timeout,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (old_timeout == 0)
sec_since_last_hit_reset = true;
@@ -935,8 +934,8 @@ struct mlx5_hws_cnt_pool *
state_update = true;
}
if (sec_since_last_hit_reset)
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
if (state_update) {
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
@@ -945,13 +944,13 @@ struct mlx5_hws_cnt_pool *
* - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
* - AGED_OUT_REPORTED -> CANDIDATE
*/
- if (!__atomic_compare_exchange_n(¶m->state, &expected,
+ if (!rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
HWS_AGE_CANDIDATE_INSIDE_RING,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected == HWS_AGE_AGED_OUT_REPORTED)
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
return 0;
}
@@ -976,9 +975,9 @@ struct mlx5_hws_cnt_pool *
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
MLX5_ASSERT(param != NULL);
- if (__atomic_compare_exchange_n(¶m->state, &expected,
- HWS_AGE_AGED_OUT_REPORTED, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
+ HWS_AGE_AGED_OUT_REPORTED,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
return param->context;
switch (expected) {
case HWS_AGE_FREE:
@@ -990,8 +989,8 @@ struct mlx5_hws_cnt_pool *
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
break;
case HWS_AGE_CANDIDATE_INSIDE_RING:
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
break;
case HWS_AGE_CANDIDATE:
/*
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index e005960..481442f 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -101,7 +101,7 @@ struct mlx5_hws_cnt_pool {
LIST_ENTRY(mlx5_hws_cnt_pool) next;
struct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;
struct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;
- uint32_t query_gen __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) query_gen __rte_cache_aligned;
struct mlx5_hws_cnt *pool;
struct mlx5_hws_cnt_raw_data_mng *raw_mng;
struct rte_ring *reuse_list;
@@ -134,10 +134,10 @@ enum {
/* HWS counter age parameter. */
struct mlx5_hws_age_param {
- uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/* Time in seconds since last hit (atomically accessed). */
- uint16_t state; /* AGE state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
uint64_t accumulator_last_hits;
/* Last total value of hits for comparing. */
uint64_t accumulator_hits;
@@ -426,7 +426,7 @@ struct mlx5_hws_age_param {
iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
hpool->pool[iidx].in_used = false;
hpool->pool[iidx].query_gen_when_free =
- __atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
qcache = hpool->cache->qcache[*queue];
if (unlikely(qcache == NULL)) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 2fce908..c627113 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {
/* RX queue private data. */
struct mlx5_rxq_priv {
uint16_t idx; /* Queue index. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -188,7 +188,7 @@ struct mlx5_rxq_priv {
/* External RX queue descriptor. */
struct mlx5_external_rxq {
uint32_t hw_id; /* Queue index in the Hardware. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
};
/* mlx5_rxq.c */
@@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
void *addr;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {
MLX5_ASSERT(rep != NULL);
/* Replace MPRQ buf. */
(*rxq->mprq_bufs)[rq_idx] = rep;
@@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
void *buf_addr;
/* Increment the refcnt of the whole chunk. */
- __atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
- MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
- __ATOMIC_RELAXED) <= strd_n + 1);
+ rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
+ MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
+ rte_memory_order_relaxed) <= strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
/*
* MLX5 device doesn't use iova but it is necessary in a
@@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
return false;
rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
- return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+ return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);
}
#define LWM_COOKIE_RXQID_OFFSET 0
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dd51687..f67aaa6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -416,7 +416,7 @@
rte_errno = EINVAL;
return -rte_errno;
}
- return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1319,7 +1319,7 @@
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
for (j = 0; j != strd_n; ++j) {
shinfo = &buf->shinfos[j];
shinfo->free_cb = mlx5_mprq_buf_free_cb;
@@ -2037,7 +2037,7 @@ struct mlx5_rxq_priv *
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
if (rxq != NULL)
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2059,7 +2059,7 @@ struct mlx5_rxq_priv *
if (rxq == NULL)
return 0;
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2138,7 +2138,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2158,7 +2158,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2447,8 +2447,8 @@ struct mlx5_ind_table_obj *
(memcmp(ind_tbl->queues, queues,
ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
== 0)) {
- __atomic_fetch_add(&ind_tbl->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
+ rte_memory_order_relaxed);
break;
}
}
@@ -2479,7 +2479,7 @@ struct mlx5_ind_table_obj *
unsigned int ret;
rte_rwlock_write_lock(&priv->ind_tbls_lock);
- ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
@@ -2561,7 +2561,7 @@ struct mlx5_ind_table_obj *
}
return ret;
}
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
return 0;
}
@@ -2626,7 +2626,7 @@ struct mlx5_ind_table_obj *
{
uint32_t refcnt;
- refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+ refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
if (refcnt <= 1)
return 0;
/*
@@ -3258,8 +3258,8 @@ struct mlx5_hrxq *
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
if (ext_rxq == NULL)
return -rte_errno;
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
if (ext_rxq->hw_id != hw_idx) {
DRV_LOG(ERR, "Port %u external RxQ index %u "
"is already mapped to HW index (requesting is "
@@ -3296,8 +3296,8 @@ struct mlx5_hrxq *
rte_errno = EINVAL;
return -rte_errno;
}
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
port_id, dpdk_idx);
rte_errno = EINVAL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f8d6728..c241a1d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1441,7 +1441,7 @@
rte_delay_us_sleep(1000 * priv->rxqs_n);
DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
if (priv->sh->config.dv_flow_en == 2) {
- if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))
flow_hw_rxq_flag_set(dev, false);
} else {
mlx5_flow_stop_default(dev);
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index b1e8ea1..0e44df5 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -179,7 +179,7 @@ struct mlx5_txq_data {
__extension__
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
bool is_hairpin; /* Whether TxQ type is Hairpin. */
unsigned int max_inline_data; /* Max inline data. */
@@ -339,8 +339,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
* the service thread, data should be re-read.
*/
rte_compiler_barrier();
- ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
- ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
+ ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
+ ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
break;
@@ -350,8 +350,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts -= ts;
if (unlikely(mts >= UINT64_MAX / 2)) {
/* We have negative integer, mts is in the past. */
- __atomic_fetch_add(&sh->txpp.err_ts_past,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
+ 1, rte_memory_order_relaxed);
return -1;
}
tick = sh->txpp.tick;
@@ -360,8 +360,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts = (mts + tick - 1) / tick;
if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
/* We have mts is too distant future. */
- __atomic_fetch_add(&sh->txpp.err_ts_future,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
+ 1, rte_memory_order_relaxed);
return -1;
}
mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
@@ -1743,8 +1743,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
/* Convert the timestamp into completion to wait. */
ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
if (txq->ts_last && ts < txq->ts_last)
- __atomic_fetch_add(&txq->sh->txpp.err_ts_order,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
+ 1, rte_memory_order_relaxed);
txq->ts_last = ts;
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
sh = txq->sh;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 5a5df2d..4e26fa2 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -538,12 +538,12 @@
uint64_t *ps;
rte_compiler_barrier();
- tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
- op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
+ tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);
+ op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);
rte_compiler_barrier();
- if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
+ if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))
continue;
- if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
+ if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))
continue;
ps = (uint64_t *)ts;
ps[0] = tm;
@@ -561,8 +561,8 @@
ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
rte_compiler_barrier();
- __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);
rte_wmb();
}
@@ -590,8 +590,8 @@
*/
DRV_LOG(DEBUG,
"Clock Queue error sync lost (%X).", opcode);
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
return;
@@ -633,10 +633,10 @@
if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
return;
MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
- sh->txpp.ts.ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
- sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+ sh->txpp.ts.ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+ sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
sh->txpp.ts_p = 0;
if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -677,8 +677,8 @@
/* Check whether we have missed interrupts. */
if (cq_ci - wq->cq_ci != 1) {
DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
- __atomic_fetch_add(&sh->txpp.err_miss_int,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
+ 1, rte_memory_order_relaxed);
/* Check sync lost on wqe index. */
if (cq_ci - wq->cq_ci >=
(((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -693,8 +693,8 @@
/* Fire new requests to Rearm Queue. */
if (error) {
DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_rearm_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
}
@@ -987,8 +987,8 @@
mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
if (to.cts.op_own >> 4) {
DRV_LOG(DEBUG, "Clock Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
return -EIO;
}
@@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);
return 0;
}
@@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
do {
uint64_t ts, ci;
- ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
- ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
+ ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);
+ ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ts,
- __ATOMIC_RELAXED) != ts)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
+ rte_memory_order_relaxed) != ts)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
- __ATOMIC_RELAXED) != ci)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
+ rte_memory_order_relaxed) != ci)
continue;
tsa->ts = ts;
tsa->ci_ts = ci;
@@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
for (i = 0; i < n_txpp; ++i)
stats[n_used + i].id = n_used + i;
stats[n_used + 0].value =
- __atomic_load_n(&sh->txpp.err_miss_int,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_miss_int,
+ rte_memory_order_relaxed);
stats[n_used + 1].value =
- __atomic_load_n(&sh->txpp.err_rearm_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
+ rte_memory_order_relaxed);
stats[n_used + 2].value =
- __atomic_load_n(&sh->txpp.err_clock_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
+ rte_memory_order_relaxed);
stats[n_used + 3].value =
- __atomic_load_n(&sh->txpp.err_ts_past,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_past,
+ rte_memory_order_relaxed);
stats[n_used + 4].value =
- __atomic_load_n(&sh->txpp.err_ts_future,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_future,
+ rte_memory_order_relaxed);
stats[n_used + 5].value =
- __atomic_load_n(&sh->txpp.err_ts_order,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_order,
+ rte_memory_order_relaxed);
stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
stats[n_used + 8].value = sh->txpp.sync_lost;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 14f55e8..da4236f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *
rte_errno = ENOMEM;
goto error;
}
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
tmpl->is_hairpin = false;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->is_hairpin = true;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
- __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
}
return ctrl;
}
@@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *
if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
return 0;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
+ if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
return 1;
if (txq_ctrl->obj) {
priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *
txq_free_elts(txq_ctrl);
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
- if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
if (!txq_ctrl->is_hairpin)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq_ctrl, next);
@@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *
if (!(*priv->txqs)[idx])
return -1;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
}
/**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index e28db2e..fc03cc0 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -203,7 +203,7 @@ struct mlx5_indexed_pool *
struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
lc = pool->cache[cidx]->lc;
- gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
+ gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
if (gc && lc != gc) {
mlx5_ipool_lock(pool);
if (lc && !(--lc->ref_cnt))
@@ -266,8 +266,8 @@ struct mlx5_indexed_pool *
pool->cache[cidx]->len = fetch_size - 1;
return pool->cache[cidx]->idx[pool->cache[cidx]->len];
}
- trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
- __ATOMIC_ACQUIRE) : 0;
+ trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
+ rte_memory_order_acquire) : 0;
trunk_n = lc ? lc->n_trunk : 0;
cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
/* Check if index reach maximum. */
@@ -332,11 +332,11 @@ struct mlx5_indexed_pool *
lc = p;
lc->ref_cnt = 1;
pool->cache[cidx]->lc = lc;
- __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
}
/* Add trunk to trunks array. */
lc->trunks[trunk_idx] = trunk;
- __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
/* Enqueue half of the index to global. */
ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
fetch_size = trunk->free >> 1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index f3c0d76..3146092 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {
struct mlx5_indexed_cache {
struct mlx5_indexed_trunk **trunks;
- volatile uint32_t n_trunk_valid; /* Trunks allocated. */
+ volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
uint32_t n_trunk; /* Trunk pointer array size. */
uint32_t ref_cnt;
uint32_t len;
@@ -266,7 +266,7 @@ struct mlx5_indexed_pool {
uint32_t free_list; /* Index to first free trunk. */
};
struct {
- struct mlx5_indexed_cache *gc;
+ RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
/* Global cache. */
struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
/* Local cache. */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 02/45] net/ixgbe: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 01/45] net/mlx5: use rte " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 03/45] net/iavf: " Tyler Retzlaff
` (43 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 14 ++++++++------
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 ++--
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c61c52b..e63ae1a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1130,7 +1130,7 @@ struct rte_ixgbe_xstats_name_off {
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbe_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -1638,7 +1638,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbevf_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
@@ -4203,7 +4203,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
/* NOTE: review for potential ordering optimization */
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -4240,7 +4240,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -4336,7 +4336,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
ixgbe_dev_wait_setup_link_complete(dev, 0);
/* NOTE: review for potential ordering optimization */
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the IXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -4348,7 +4349,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR,
"Create link thread failed!");
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 22fc3be..8ad841e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -511,7 +511,7 @@ struct ixgbe_adapter {
*/
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
- bool link_thread_running;
+ RTE_ATOMIC(bool) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index f6c17d4..e7dfd6f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1831,7 +1831,7 @@
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
@@ -2114,7 +2114,7 @@
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 03/45] net/iavf: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 02/45] net/ixgbe: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 04/45] net/ice: " Tyler Retzlaff
` (42 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/iavf/iavf.h | 16 ++++++++--------
drivers/net/iavf/iavf_rxtx.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +++++++-------
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 824ae4a..6b977e5 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -238,8 +238,8 @@ struct iavf_info {
struct virtchnl_vlan_caps vlan_v2_caps;
uint64_t supported_rxdid;
uint8_t *proto_xtr; /* proto xtr type for all queues */
- volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
- uint32_t pend_cmd_count;
+ volatile RTE_ATOMIC(enum virtchnl_ops) pend_cmd; /* pending command not finished */
+ RTE_ATOMIC(uint32_t) pend_cmd_count;
int cmd_retval; /* return value of the cmd response from PF */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
@@ -456,13 +456,13 @@ struct iavf_cmd_info {
_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 1, rte_memory_order_relaxed);
return !ret;
}
@@ -472,13 +472,13 @@ struct iavf_cmd_info {
_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 2, rte_memory_order_relaxed);
return !ret;
}
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 0a5246d..d1d4e9f 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2025,7 +2025,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
@@ -2152,7 +2152,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
index 83825aa..20b656e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_neon.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
@@ -273,7 +273,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1111d30..6d5969f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -41,7 +41,7 @@ struct iavf_event_element {
};
struct iavf_event_handler {
- uint32_t ndev;
+ RTE_ATOMIC(uint32_t) ndev;
rte_thread_t tid;
int fd[2];
pthread_mutex_t lock;
@@ -129,7 +129,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_add(&handler->ndev, 1, __ATOMIC_RELAXED) + 1 != 1)
+ if (rte_atomic_fetch_add_explicit(&handler->ndev, 1, rte_memory_order_relaxed) + 1 != 1)
return 0;
#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
@@ -137,7 +137,7 @@ struct iavf_event_handler {
int err = pipe(handler->fd);
#endif
if (err != 0) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -146,7 +146,7 @@ struct iavf_event_handler {
if (rte_thread_create_internal_control(&handler->tid, "iavf-event",
iavf_dev_event_handle, NULL)) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -158,7 +158,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed) - 1 != 0)
return;
int unused = pthread_cancel((pthread_t)handler->tid.opaque_id);
@@ -574,8 +574,8 @@ struct iavf_event_handler {
/* read message and it's expected one */
if (msg_opc == vf->pend_cmd) {
uint32_t cmd_count =
- __atomic_fetch_sub(&vf->pend_cmd_count,
- 1, __ATOMIC_RELAXED) - 1;
+ rte_atomic_fetch_sub_explicit(&vf->pend_cmd_count,
+ 1, rte_memory_order_relaxed) - 1;
if (cmd_count == 0)
_notify_cmd(vf, msg_ret);
} else {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 04/45] net/ice: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (2 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 03/45] net/iavf: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 05/45] net/i40e: " Tyler Retzlaff
` (41 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ice/base/ice_osdep.h | 4 ++--
drivers/net/ice/ice_dcf.c | 6 +++---
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 ++++----
drivers/net/ice/ice_dcf_parent.c | 16 ++++++++--------
drivers/net/ice/ice_ethdev.c | 12 ++++++------
drivers/net/ice/ice_ethdev.h | 2 +-
7 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index 0e14b93..c17f1bf 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -235,7 +235,7 @@ struct ice_lock {
ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
struct ice_dma_mem *mem, u64 size)
{
- static uint64_t ice_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) ice_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -243,7 +243,7 @@ struct ice_lock {
return NULL;
snprintf(z_name, sizeof(z_name), "ice_dma_%" PRIu64,
- __atomic_fetch_add(&ice_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&ice_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
0, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7f8f516..204d4ea 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -764,7 +764,7 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
rte_spinlock_init(&hw->vc_cmd_queue_lock);
TAILQ_INIT(&hw->vc_cmd_queue);
- __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->vsi_update_thread_num, 0, rte_memory_order_relaxed);
hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
if (hw->arq_buf == NULL) {
@@ -888,8 +888,8 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
ice_dcf_dev_interrupt_handler, hw);
/* Wait for all `ice-thread` threads to exit. */
- while (__atomic_load_n(&hw->vsi_update_thread_num,
- __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(&hw->vsi_update_thread_num,
+ rte_memory_order_acquire) != 0)
rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
ice_dcf_mode_disable(hw);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index aa2a723..7726681 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -105,7 +105,7 @@ struct ice_dcf_hw {
void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen);
- int vsi_update_thread_num;
+ RTE_ATOMIC(int) vsi_update_thread_num;
uint8_t *arq_buf;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index d58ec9d..8f3a385 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -1743,7 +1743,7 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
ice_dcf_adminq_need_retry(struct ice_adapter *ad)
{
return ad->hw.dcf_enabled &&
- !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+ !rte_atomic_load_explicit(&ad->dcf_state_on, rte_memory_order_relaxed);
}
/* Add UDP tunneling port */
@@ -1944,12 +1944,12 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
return -1;
}
- __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true, rte_memory_order_relaxed);
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 6e845f4..a478b69 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -123,8 +123,8 @@ struct ice_dcf_reset_event_param {
container_of(hw, struct ice_dcf_adapter, real_hw);
struct ice_adapter *parent_adapter = &adapter->parent;
- __atomic_fetch_add(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_relaxed);
rte_thread_detach(rte_thread_self());
@@ -133,8 +133,8 @@ struct ice_dcf_reset_event_param {
rte_spinlock_lock(&vsi_update_lock);
if (!ice_dcf_handle_vsi_update_event(hw)) {
- __atomic_store_n(&parent_adapter->dcf_state_on, true,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true,
+ rte_memory_order_relaxed);
ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
}
@@ -156,8 +156,8 @@ struct ice_dcf_reset_event_param {
free(param);
- __atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_release);
return 0;
}
@@ -269,8 +269,8 @@ struct ice_dcf_reset_event_param {
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
start_vsi_reset_thread(dcf_hw, true,
pf_msg->event_data.vf_vsi_map.vf_id);
break;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 87385d2..0f35c6a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4062,9 +4062,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = &dev->data->dev_link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
@@ -4078,9 +4078,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1a848b3..6cba643 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -621,7 +621,7 @@ struct ice_adapter {
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
/* True if DCF state of the associated PF is on */
- bool dcf_state_on;
+ RTE_ATOMIC(bool) dcf_state_on;
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 05/45] net/i40e: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (3 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 04/45] net/ice: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 06/45] net/hns3: " Tyler Retzlaff
` (40 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/i40e/i40e_ethdev.c | 4 ++--
drivers/net/i40e/i40e_rxtx.c | 6 +++---
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 380ce1a..801cc95 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -4687,7 +4687,7 @@ enum i40e_status_code
u64 size,
u32 alignment)
{
- static uint64_t i40e_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) i40e_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -4695,7 +4695,7 @@ enum i40e_status_code
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
- __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&i40e_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 5d25ab4..155f243 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -486,7 +486,7 @@
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
@@ -745,7 +745,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
@@ -867,7 +867,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index d873e30..3a99137 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -425,7 +425,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 06/45] net/hns3: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (4 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 05/45] net/i40e: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 07/45] net/bnxt: " Tyler Retzlaff
` (39 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/hns3/hns3_cmd.c | 18 ++++++------
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 +++++++++++------------
drivers/net/hns3/hns3_ethdev.h | 32 ++++++++++-----------
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++++++++++++++++++--------------------
drivers/net/hns3/hns3_intr.c | 36 +++++++++++------------
drivers/net/hns3/hns3_intr.h | 4 +--
drivers/net/hns3/hns3_mbx.c | 6 ++--
drivers/net/hns3/hns3_mp.c | 6 ++--
drivers/net/hns3/hns3_rxtx.c | 10 +++----
drivers/net/hns3/hns3_tm.c | 4 +--
11 files changed, 107 insertions(+), 107 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 001ff49..3c5fdbe 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,12 @@
hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
uint64_t size, uint32_t alignment)
{
- static uint64_t hns3_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
- __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
@@ -198,8 +198,8 @@
hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
csq->next_to_use, csq->next_to_clean);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- __atomic_store_n(&hw->reset.disable_cmd, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+ rte_memory_order_relaxed);
hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
}
@@ -313,7 +313,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (hns3_cmd_csq_done(hw))
return 0;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw,
"Don't wait for reply because of disable_cmd");
return -EBUSY;
@@ -360,7 +360,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
int retval;
uint32_t ntc;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -747,7 +747,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
ret = -EBUSY;
goto err_cmd_init;
}
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
ret = hns3_cmd_query_firmware_version_and_capability(hw);
if (ret) {
@@ -790,7 +790,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
return 0;
err_cmd_init:
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return ret;
}
@@ -819,7 +819,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (!hns->is_vf)
(void)hns3_firmware_compat_config(hw, false);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
/*
* A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 915e4eb..2f917fe 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,7 @@
* and configured directly to the hardware in the RESET_STAGE_RESTORE
* stage of the reset process.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a..327f6fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ struct hns3_intr_state {
};
static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
@@ -134,7 +134,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
hw->reset.stats.imp_cnt++;
@@ -148,7 +148,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
hw->reset.stats.global_cnt++;
@@ -1151,7 +1151,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* ensure that the hardware configuration remains unchanged before and
* after reset.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
}
@@ -1175,7 +1175,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* we will restore configurations to hardware in hns3_restore_vlan_table
* and hns3_restore_vlan_conf later.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
if (ret) {
hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5059,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5150,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5158,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
return ret;
hw->mac.link_status = RTE_ETH_LINK_DOWN;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -5184,7 +5184,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_tm_dev_stop_proc(hw);
hns3_config_mac_tnl_int(hw, false);
hns3_stop_tqps(hw);
@@ -5577,7 +5577,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
last_req = hns3_get_reset_level(hns, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -5677,7 +5677,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
}
static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
struct hns3_hw *hw = &hns->hw;
enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5737,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* any mailbox handling or command to firmware is only valid
* after hns3_cmd_init is called.
*/
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hw->reset.stats.request_cnt++;
break;
case HNS3_IMP_RESET:
@@ -5792,7 +5792,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -5913,10 +5913,10 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5925,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Check if there is any ongoing reset in the hardware. This status can
@@ -6576,7 +6576,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e70c5ff..4c0f076 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -401,17 +401,17 @@ enum hns3_schedule {
struct hns3_reset_data {
enum hns3_reset_stage stage;
- uint16_t schedule;
+ RTE_ATOMIC(uint16_t) schedule;
/* Reset flag, covering the entire reset process */
- uint16_t resetting;
+ RTE_ATOMIC(uint16_t) resetting;
/* Used to disable sending cmds during reset */
- uint16_t disable_cmd;
+ RTE_ATOMIC(uint16_t) disable_cmd;
/* The reset level being processed */
enum hns3_reset_level level;
/* Reset level set, each bit represents a reset level */
- uint64_t pending;
+ RTE_ATOMIC(uint64_t) pending;
/* Request reset level set, from interrupt or mailbox */
- uint64_t request;
+ RTE_ATOMIC(uint64_t) request;
int attempts; /* Reset failure retry */
int retries; /* Timeout failure retry in reset_post */
/*
@@ -499,7 +499,7 @@ struct hns3_hw {
* by dev_set_link_up() or dev_start().
*/
bool set_link_down;
- unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+ RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */
struct hns3_tqp_stats tqp_stats;
/* Include Mac stats | Rx stats | Tx stats */
struct hns3_mac_stats mac_stats;
@@ -844,7 +844,7 @@ struct hns3_vf {
struct hns3_adapter *adapter;
/* Whether PF support push link status change to VF */
- uint16_t pf_push_lsc_cap;
+ RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
/*
* If PF support push link status change, VF still need send request to
@@ -853,7 +853,7 @@ struct hns3_vf {
*/
uint16_t req_link_info_cnt;
- uint16_t poll_job_started; /* whether poll job is started */
+ RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
};
struct hns3_adapter {
@@ -997,32 +997,32 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
hns3_read_reg((a)->io_base, (reg))
static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t res;
- res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+ res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & (1UL << nr)) != 0;
return res;
}
static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_or_explicit(addr, (1UL << nr), rte_memory_order_relaxed);
}
static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), rte_memory_order_relaxed);
}
static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t mask = (1UL << nr);
- return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+ return rte_atomic_fetch_and_explicit(addr, ~mask, rte_memory_order_relaxed) & mask;
}
int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a..b83d5b9 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
};
static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
@@ -484,7 +484,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* MTU value issued by hns3 VF PMD must be less than or equal to
* PF's MTU.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "Failed to set mtu during resetting");
return -EIO;
}
@@ -565,7 +565,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
val = hns3_read_dev(hw, HNS3_VF_RST_ING);
hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
}
static void
@@ -650,8 +650,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
struct hns3_vf_to_pf_msg req;
- __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+ rte_memory_order_release);
hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* mailbox from PF driver to get this capability.
*/
hns3vf_handle_mbx_msg(hw);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
break;
remain_ms--;
@@ -677,10 +677,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* state: unknown (means pf not ack), not_supported, supported.
* Here config it as 'not_supported' when it's 'unknown' state.
*/
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
hns3_info(hw, "detect PF support push link status change!");
} else {
@@ -920,7 +920,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
bool send_req;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return;
send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +956,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* sending request to PF kernel driver, then could update link status by
* process PF kernel driver's link status mailbox message.
*/
- if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
return;
if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +994,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_hw *hw = &hns->hw;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw,
"vf set vlan id failed during resetting, vlan_id =%u",
vlan_id);
@@ -1059,7 +1059,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
unsigned int tmp_mask;
int ret = 0;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
mask);
return -EIO;
@@ -1252,7 +1252,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
- __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
hns3vf_service_handler(dev);
}
@@ -1264,7 +1264,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rte_eal_alarm_cancel(hns3vf_service_handler, dev);
- __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
}
static int
@@ -1500,10 +1500,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -1528,7 +1528,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1643,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1773,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -1847,7 +1847,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (ret)
return ret;
}
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return 0;
}
@@ -1888,7 +1888,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* from table space. Hence, for function reset software intervention is
* required to delete the entries.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -2030,7 +2030,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
}
static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
enum hns3_reset_level reset_level;
@@ -2070,10 +2070,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2082,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Hardware reset has been notified, we now have to poll & check if
@@ -2278,7 +2278,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30..26fa2eb 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ enum hns3_hw_err_report_type {
static int
hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
- int num, uint64_t *levels,
+ int num, RTE_ATOMIC(uint64_t) *levels,
enum hns3_hw_err_report_type err_type)
{
const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,7 @@ enum hns3_hw_err_report_type {
hw->reset.request = 0;
hw->reset.pending = 0;
hw->reset.resetting = 0;
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
if (!hw->reset.wait_data) {
@@ -2419,8 +2419,8 @@ enum hns3_hw_err_report_type {
/* Reschedule the reset process after successful initialization */
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+ rte_memory_order_relaxed);
return;
}
@@ -2428,15 +2428,15 @@ enum hns3_hw_err_report_type {
return;
/* Schedule restart alarm if it is not scheduled yet */
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_REQUESTED)
return;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED)
rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
}
@@ -2453,11 +2453,11 @@ enum hns3_hw_err_report_type {
return;
}
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) !=
SCHEDULE_NONE)
return;
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
}
@@ -2537,7 +2537,7 @@ enum hns3_hw_err_report_type {
}
static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
uint64_t merge_cnt = hw->reset.stats.merge_cnt;
uint64_t tmp;
@@ -2633,7 +2633,7 @@ enum hns3_hw_err_report_type {
* Regardless of whether the execution is successful or not, the
* flow after execution must be continued.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
(void)hns3_cmd_init(hw);
reset_fail:
hw->reset.attempts = 0;
@@ -2661,7 +2661,7 @@ enum hns3_hw_err_report_type {
int ret;
if (hw->reset.stage == RESET_STAGE_NONE) {
- __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_DOWN;
hns3_report_reset_begin(hw);
ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2750,7 @@ enum hns3_hw_err_report_type {
hns3_notify_reset_ready(hw, false);
hns3_clear_reset_level(hw, &hw->reset.pending);
hns3_clear_reset_status(hw);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.attempts = 0;
hw->reset.stats.success_cnt++;
hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2812,7 @@ enum hns3_hw_err_report_type {
hw->reset.mbuf_deferred_free = false;
}
rte_spinlock_unlock(&hw->lock);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_NONE;
hns3_clock_gettime(&tv);
timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c07..1edb07d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
};
int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
void hns3_handle_error(struct hns3_adapter *hns);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc16..10c6e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,7 @@
mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
while (wait_time < mbx_time_limit) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw, "Don't wait for mbx response because of "
"disable_cmd");
return -EBUSY;
@@ -382,7 +382,7 @@
rte_spinlock_lock(&hw->cmq.crq.lock);
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
@@ -457,7 +457,7 @@
}
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f194..ba8f8ec 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,7 @@
int i;
if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
- __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+ rte_atomic_load_explicit(&hw->secondary_cnt, rte_memory_order_relaxed) == 0)
return;
if (!mp_req_type_is_valid(type)) {
@@ -277,7 +277,7 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev)
ret);
return ret;
}
- __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
} else {
ret = hns3_mp_init_primary();
if (ret) {
@@ -297,7 +297,7 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
process_data.eth_dev_cnt--;
if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 7e636a0..73a388b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4464,7 +4464,7 @@
struct hns3_adapter *hns = eth_dev->data->dev_private;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+ rte_atomic_load_explicit(&hns->hw.reset.resetting, rte_memory_order_relaxed) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4530,7 +4530,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4586,7 +4586,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4615,7 +4615,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4648,7 +4648,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d969164..92a6685 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
/* don't goto fail_clear, user may try later */
@@ -1141,7 +1141,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
return -EBUSY;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 07/45] net/bnxt: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (5 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 06/45] net/hns3: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 08/45] net/cpfl: " Tyler Retzlaff
` (38 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/bnxt/bnxt_cpr.h | 4 ++--
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 ++++++++-----
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 ++--
5 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index c7b3480..43f06fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -107,7 +107,7 @@ struct bnxt_cp_ring_info {
/**
* Check validity of a completion ring entry. If the entry is valid, include a
- * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * C11 rte_memory_order_acquire fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
@@ -130,7 +130,7 @@ struct bnxt_cp_ring_info {
expected = !(raw_cons & ring_size);
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
if (valid == expected) {
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
return true;
}
return false;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 77bc382..36e0ac3 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- uint64_t rx_mbuf_alloc_fail;
+ RTE_ATOMIC(uint64_t) rx_mbuf_alloc_fail;
uint8_t need_realloc;
const struct rte_memzone *mz;
};
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 3542975..ca5d2c6 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -49,7 +49,8 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -86,7 +87,8 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -465,7 +467,8 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -1677,8 +1680,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return -ENOMEM;
}
}
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 775400f..04864e0 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -240,7 +240,7 @@
rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
/* Use acquire fence to order loads of descriptor words. */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Reload lower 64b of descriptors to make it ordered after info3_v. */
rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
((void *)&cpr->cp_desc_ring[cons + 7],
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 6a6feab..479f819 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -663,7 +663,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -724,7 +724,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 08/45] net/cpfl: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (6 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 07/45] net/bnxt: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 09/45] net/af_xdp: " Tyler Retzlaff
` (37 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/cpfl/cpfl_ethdev.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ef19aa1..5b47e22 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -300,8 +300,9 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed +=
+ rte_atomic_load_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -349,7 +350,8 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 09/45] net/af_xdp: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (7 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 08/45] net/cpfl: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 10/45] net/octeon_ep: " Tyler Retzlaff
` (36 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 268a130..4833180 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -116,7 +116,7 @@ struct xsk_umem_info {
const struct rte_memzone *mz;
struct rte_mempool *mb_pool;
void *buffer;
- uint8_t refcnt;
+ RTE_ATOMIC(uint8_t) refcnt;
uint32_t max_xsks;
};
@@ -995,7 +995,8 @@ static int link_xdp_prog_with_dev(int ifindex, int fd, __u32 flags)
break;
xsk_socket__delete(rxq->xsk);
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1,
+ rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
/* free pkt_tx_queue */
@@ -1097,8 +1098,8 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
ret = -1;
goto out;
}
- if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
- __ATOMIC_ACQUIRE)) {
+ if (rte_atomic_load_explicit(&internals->rx_queues[i].umem->refcnt,
+ rte_memory_order_acquire)) {
*umem = internals->rx_queues[i].umem;
goto out;
}
@@ -1131,11 +1132,11 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
return NULL;
if (umem != NULL &&
- __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
+ rte_atomic_load_explicit(&umem->refcnt, rte_memory_order_acquire) <
umem->max_xsks) {
AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
internals->if_name, rxq->xsk_queue_idx);
- __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&umem->refcnt, 1, rte_memory_order_acquire);
}
}
@@ -1177,7 +1178,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
mb_pool->name, umem->max_xsks);
}
- __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&umem->refcnt, 1, rte_memory_order_release);
}
return umem;
@@ -1606,7 +1607,8 @@ struct msg_internal {
if (rxq->umem == NULL)
return -ENOMEM;
txq->umem = rxq->umem;
- reserve_before = __atomic_load_n(&rxq->umem->refcnt, __ATOMIC_ACQUIRE) <= 1;
+ reserve_before = rte_atomic_load_explicit(&rxq->umem->refcnt,
+ rte_memory_order_acquire) <= 1;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
@@ -1723,7 +1725,7 @@ struct msg_internal {
out_xsk:
xsk_socket__delete(rxq->xsk);
out_umem:
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1, rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
return ret;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 10/45] net/octeon_ep: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (8 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 09/45] net/af_xdp: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 11/45] net/octeontx: " Tyler Retzlaff
` (35 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +++--
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +++--
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx2_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx_ep_common.h | 4 ++--
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 ++++--
6 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.h b/drivers/net/octeon_ep/cnxk_ep_rx.h
index ecf95cd..9422042 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.h
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.h
@@ -98,7 +98,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(droq->pkts_sent_ism, rte_memory_order_relaxed);
new_pkts = val - droq->pkts_sent_prev;
droq->pkts_sent_prev = val;
@@ -111,7 +111,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_tx.c b/drivers/net/octeon_ep/cnxk_ep_tx.c
index 233c8aa..e093140 100644
--- a/drivers/net/octeon_ep/cnxk_ep_tx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_tx.c
@@ -15,7 +15,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(iq->inst_cnt_ism, rte_memory_order_relaxed);
iq->inst_cnt += val - iq->inst_cnt_prev;
iq->inst_cnt_prev = val;
@@ -27,7 +27,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 39f357e..39b28de 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -150,10 +150,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
- (void *)iq->inst_cnt_ism, ism_addr);
+ (void *)(uintptr_t)iq->inst_cnt_ism, ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
@@ -235,10 +235,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
- oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ oq_no, (void *)(uintptr_t)droq->pkts_sent_ism, ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 25e0e5a..2aeebb4 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -300,10 +300,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
- (void *)iq->inst_cnt_ism,
+ (void *)(uintptr_t)iq->inst_cnt_ism,
(unsigned int)ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
@@ -386,10 +386,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
- (void *)droq->pkts_sent_ism,
+ (void *)(uintptr_t)droq->pkts_sent_ism,
(unsigned int)ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 7776940..73eb0c9 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -218,7 +218,7 @@ struct otx_ep_iq_config {
*/
struct otx_ep_instr_queue {
/* Location in memory updated by SDP ISM */
- uint32_t *inst_cnt_ism;
+ RTE_ATOMIC(uint32_t) *inst_cnt_ism;
struct rte_mbuf **mbuf_list;
/* Pointer to the Virtual Base addr of the input ring. */
uint8_t *base_addr;
@@ -413,7 +413,7 @@ struct otx_ep_droq {
uint8_t ism_ena;
/* Pointer to host memory copy of output packet count, set by ISM */
- uint32_t *pkts_sent_ism;
+ RTE_ATOMIC(uint32_t) *pkts_sent_ism;
uint32_t pkts_sent_prev;
/* Statistics for this DROQ. */
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 59144e0..eb2d8c1 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -475,7 +475,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
@@ -871,7 +872,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 11/45] net/octeontx: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (9 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 10/45] net/octeon_ep: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 12/45] net/cxgbe: " Tyler Retzlaff
` (34 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/octeontx/octeontx_ethdev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index bec54fd..64d1666 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -31,7 +31,7 @@
/* Useful in stopping/closing event device if no of
* eth ports are using it.
*/
-uint16_t evdev_refcnt;
+RTE_ATOMIC(uint16_t) evdev_refcnt;
#define OCTEONTX_QLM_MODE_SGMII 7
#define OCTEONTX_QLM_MODE_XFI 12
@@ -559,7 +559,7 @@ enum octeontx_link_speed {
return 0;
/* Stopping/closing event device once all eth ports are closed. */
- if (__atomic_fetch_sub(&evdev_refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&evdev_refcnt, 1, rte_memory_order_acquire) - 1 == 0) {
rte_event_dev_stop(nic->evdev);
rte_event_dev_close(nic->evdev);
}
@@ -1593,7 +1593,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
nic->pko_vfid = pko_vfid;
nic->port_id = port;
nic->evdev = evdev;
- __atomic_fetch_add(&evdev_refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&evdev_refcnt, 1, rte_memory_order_acquire);
res = octeontx_port_open(nic);
if (res < 0)
@@ -1844,7 +1844,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
}
}
- __atomic_store_n(&evdev_refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&evdev_refcnt, 0, rte_memory_order_release);
/*
* Do 1:1 links for ports & queues. All queues would be mapped to
* one port. If there are more ports than queues, then some ports
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 12/45] net/cxgbe: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (10 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 11/45] net/octeontx: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 13/45] net/gve: " Tyler Retzlaff
` (33 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/cxgbe/clip_tbl.c | 12 ++++++------
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 ++++++++++----------
drivers/net/cxgbe/cxgbe_ofld.h | 6 +++---
drivers/net/cxgbe/l2t.c | 12 ++++++------
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +++++++++++----------
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 ++++++------
drivers/net/cxgbe/smt.h | 2 +-
10 files changed, 46 insertions(+), 45 deletions(-)
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
index b709e26..8588b88 100644
--- a/drivers/net/cxgbe/clip_tbl.c
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -55,7 +55,7 @@ void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
int ret;
t4_os_lock(&ce->lock);
- if (__atomic_fetch_sub(&ce->refcnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ce->refcnt, 1, rte_memory_order_relaxed) - 1 == 0) {
ret = clip6_release_mbox(dev, ce->addr);
if (ret)
dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
@@ -79,7 +79,7 @@ static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
unsigned int clipt_size = c->clipt_size;
for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -114,12 +114,12 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce = find_or_alloc_clipe(ctbl, lip);
if (ce) {
t4_os_lock(&ce->lock);
- if (__atomic_load_n(&ce->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&ce->refcnt, rte_memory_order_relaxed) == 0) {
rte_memcpy(ce->addr, lip, sizeof(ce->addr));
if (v6) {
ce->type = FILTER_TYPE_IPV6;
- __atomic_store_n(&ce->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ce->refcnt, 1,
+ rte_memory_order_relaxed);
ret = clip6_get_mbox(dev, lip);
if (ret)
dev_debug(adap,
@@ -129,7 +129,7 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce->type = FILTER_TYPE_IPV4;
}
} else {
- __atomic_fetch_add(&ce->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ce->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&ce->lock);
}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
index 3b2be66..439fcf6 100644
--- a/drivers/net/cxgbe/clip_tbl.h
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -13,7 +13,7 @@ struct clip_entry {
enum filter_type type; /* entry type */
u32 addr[4]; /* IPV4 or IPV6 address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct clip_tbl {
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index c479454..2ed21f2 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -418,15 +418,15 @@ void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- __atomic_fetch_sub(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
}
@@ -448,15 +448,15 @@ void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
t->tid_tab[tid] = data;
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
- __atomic_fetch_add(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
index 33697c7..48a5ec0 100644
--- a/drivers/net/cxgbe/cxgbe_ofld.h
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -60,10 +60,10 @@ struct tid_info {
unsigned int atids_in_use;
/* TIDs in the TCAM */
- u32 tids_in_use;
+ RTE_ATOMIC(u32) tids_in_use;
/* TIDs in the HASH */
- u32 hash_tids_in_use;
- u32 conns_in_use;
+ RTE_ATOMIC(u32) hash_tids_in_use;
+ RTE_ATOMIC(u32) conns_in_use;
rte_spinlock_t atid_lock __rte_cache_aligned;
rte_spinlock_t ftid_lock;
diff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c
index 21f4019..ecb5fec 100644
--- a/drivers/net/cxgbe/l2t.c
+++ b/drivers/net/cxgbe/l2t.c
@@ -14,8 +14,8 @@
*/
void cxgbe_l2t_release(struct l2t_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -112,7 +112,7 @@ static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
struct l2t_entry *first_free = NULL;
for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -151,18 +151,18 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
diff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h
index e4c0ebe..67d0197 100644
--- a/drivers/net/cxgbe/l2t.h
+++ b/drivers/net/cxgbe/l2t.h
@@ -30,7 +30,7 @@ struct l2t_entry {
u8 lport; /* destination port */
u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct l2t_data {
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
index 8e0da9c..79a7daa 100644
--- a/drivers/net/cxgbe/mps_tcam.c
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -76,7 +76,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- __atomic_fetch_add(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
@@ -98,7 +98,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
entry = &mpstcam->entry[ret];
memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
if (cxgbe_update_free_idx(mpstcam))
@@ -147,7 +147,7 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
* provided value is -1
*/
if (entry->state == MPS_ENTRY_UNUSED) {
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
}
@@ -165,7 +165,7 @@ static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
{
memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_UNUSED;
}
@@ -190,12 +190,13 @@ int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
return -EINVAL;
}
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
entry->mask, idx, 1, pi->port_id,
false);
else
- ret = __atomic_fetch_sub(&entry->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&entry->refcnt, 1,
+ rte_memory_order_relaxed) - 1;
if (ret == 0) {
reset_mpstcam_entry(entry);
@@ -222,7 +223,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
goto out_unlock;
ret = t4_alloc_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -231,7 +232,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
@@ -253,7 +254,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) != 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) != 1)
goto out_unlock;
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -262,7 +263,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
diff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h
index 363786b..4b421f7 100644
--- a/drivers/net/cxgbe/mps_tcam.h
+++ b/drivers/net/cxgbe/mps_tcam.h
@@ -29,7 +29,7 @@ struct mps_tcam_entry {
u8 mask[RTE_ETHER_ADDR_LEN];
struct mpstcam_table *mpstcam; /* backptr */
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
};
struct mpstcam_table {
diff --git a/drivers/net/cxgbe/smt.c b/drivers/net/cxgbe/smt.c
index 4e14a73..2f961c1 100644
--- a/drivers/net/cxgbe/smt.c
+++ b/drivers/net/cxgbe/smt.c
@@ -119,7 +119,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end, *first_free = NULL;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -156,7 +156,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_smte(s, smac);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->pfvf = pfvf;
rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
ret = write_smt_entry(dev, e);
@@ -168,9 +168,9 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
goto out_write_unlock;
}
e->state = SMT_STATE_SWITCHING;
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
@@ -195,8 +195,8 @@ struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
void cxgbe_smt_release(struct smt_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/smt.h b/drivers/net/cxgbe/smt.h
index 531810e..8b378ae 100644
--- a/drivers/net/cxgbe/smt.h
+++ b/drivers/net/cxgbe/smt.h
@@ -23,7 +23,7 @@ struct smt_entry {
u16 pfvf;
u16 hw_idx;
u8 src_mac[RTE_ETHER_ADDR_LEN];
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
rte_spinlock_t lock;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 13/45] net/gve: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (11 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 12/45] net/cxgbe: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 14/45] net/memif: " Tyler Retzlaff
` (32 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/gve/base/gve_osdep.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/gve/base/gve_osdep.h b/drivers/net/gve/base/gve_osdep.h
index a3702f4..c0ee0d5 100644
--- a/drivers/net/gve/base/gve_osdep.h
+++ b/drivers/net/gve/base/gve_osdep.h
@@ -135,7 +135,7 @@ struct gve_dma_mem {
static inline void *
gve_alloc_dma_mem(struct gve_dma_mem *mem, u64 size)
{
- static uint16_t gve_dma_memzone_id;
+ static RTE_ATOMIC(uint16_t) gve_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -143,7 +143,7 @@ struct gve_dma_mem {
return NULL;
snprintf(z_name, sizeof(z_name), "gve_dma_%u",
- __atomic_fetch_add(&gve_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&gve_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG,
PAGE_SIZE);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 14/45] net/memif: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (12 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 13/45] net/gve: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 15/45] net/thunderx: " Tyler Retzlaff
` (31 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/memif/memif.h | 4 ++--
drivers/net/memif/rte_eth_memif.c | 50 +++++++++++++++++++--------------------
2 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/memif/memif.h b/drivers/net/memif/memif.h
index cb72c69..ccaa218 100644
--- a/drivers/net/memif/memif.h
+++ b/drivers/net/memif/memif.h
@@ -169,9 +169,9 @@ typedef struct __rte_packed __rte_aligned(128)
uint32_t cookie; /**< MEMIF_COOKIE */
uint16_t flags; /**< flags */
#define MEMIF_RING_FLAG_MASK_INT 1 /**< disable interrupt mode */
- uint16_t head; /**< pointer to ring buffer head */
+ RTE_ATOMIC(uint16_t) head; /**< pointer to ring buffer head */
MEMIF_CACHELINE_ALIGN_MARK(cacheline1);
- uint16_t tail; /**< pointer to ring buffer tail */
+ RTE_ATOMIC(uint16_t) tail; /**< pointer to ring buffer tail */
MEMIF_CACHELINE_ALIGN_MARK(cacheline2);
memif_desc_t desc[0]; /**< buffer descriptors */
} memif_ring_t;
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 18377d9..16da22b 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -262,7 +262,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* in function eth_memif_rx for C2S queues.
*/
- cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ cur_tail = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
while (mq->last_tail != cur_tail) {
RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
@@ -334,10 +334,10 @@ struct mp_region_msg {
if (type == MEMIF_RING_C2S) {
cur_slot = mq->last_head;
- last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire);
} else {
cur_slot = mq->last_tail;
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
}
if (cur_slot == last_slot)
@@ -473,7 +473,7 @@ struct mp_region_msg {
no_free_bufs:
if (type == MEMIF_RING_C2S) {
- __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, cur_slot, rte_memory_order_release);
mq->last_head = cur_slot;
} else {
mq->last_tail = cur_slot;
@@ -485,7 +485,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
while (n_slots--) {
@@ -493,7 +493,7 @@ struct mp_region_msg {
d0 = &ring->desc[s0];
d0->length = pmd->run.pkt_buffer_size;
}
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
}
mq->n_pkts += n_rx_pkts;
@@ -541,7 +541,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* to synchronize it between threads.
*/
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
if (cur_slot == last_slot)
goto refill;
n_slots = last_slot - cur_slot;
@@ -591,7 +591,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
if (n_slots < 32)
@@ -620,7 +620,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_tx.
*/
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
mq->n_pkts += n_rx_pkts;
@@ -668,9 +668,9 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot +
- __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
} else {
/* For S2C queues ring->tail is updated by the sender and
* this function is called in the context of sending thread.
@@ -678,8 +678,8 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
- n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
+ slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_relaxed);
+ n_free = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire) - slot;
}
uint16_t i;
@@ -792,9 +792,9 @@ struct mp_region_msg {
no_free_slots:
if (type == MEMIF_RING_C2S)
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
else
- __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, slot, rte_memory_order_release);
if (((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) &&
(rte_intr_fd_get(mq->intr_handle) >= 0)) {
@@ -882,7 +882,7 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot + mq->last_tail;
int used_slots;
@@ -942,7 +942,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_rx for C2S rings.
*/
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
/* Send interrupt, if enabled. */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
@@ -1155,8 +1155,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_c2s_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1175,8 +1175,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_s2c_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1314,8 +1314,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
@@ -1330,8 +1330,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 15/45] net/thunderx: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (13 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 14/45] net/memif: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 16/45] net/virtio: " Tyler Retzlaff
` (30 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/thunderx/nicvf_rxtx.c | 9 +++++----
drivers/net/thunderx/nicvf_struct.h | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index defa551..2cb6a99 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -374,8 +374,8 @@
NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
(nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
- next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
- __ATOMIC_ACQUIRE);
+ next_tail = rte_atomic_fetch_add_explicit(&rbdr->next_tail, to_fill,
+ rte_memory_order_acquire);
ltail = next_tail;
for (i = 0; i < to_fill; i++) {
struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
@@ -385,9 +385,10 @@
ltail++;
}
- rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&rbdr->tail, next_tail,
+ rte_memory_order_relaxed);
- __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&rbdr->tail, ltail, rte_memory_order_release);
nicvf_addr_write(door, to_fill);
return to_fill;
}
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 13cf8fe..6507898 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -20,8 +20,8 @@ struct nicvf_rbdr {
struct rbdr_entry_t *desc;
nicvf_iova_addr_t phys;
uint32_t buffsz;
- uint32_t tail;
- uint32_t next_tail;
+ RTE_ATOMIC(uint32_t) tail;
+ RTE_ATOMIC(uint32_t) next_tail;
uint32_t head;
uint32_t qlen_mask;
} __rte_cache_aligned;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 16/45] net/virtio: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (14 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 15/45] net/thunderx: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 17/45] net/hinic: " Tyler Retzlaff
` (29 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/virtio/virtio_ring.h | 4 +--
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 ++++-----
drivers/net/virtio/virtqueue.h | 32 ++++++++++++------------
3 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b..2a25751 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -59,7 +59,7 @@ struct vring_used_elem {
struct vring_used {
uint16_t flags;
- uint16_t idx;
+ RTE_ATOMIC(uint16_t) idx;
struct vring_used_elem ring[];
};
@@ -70,7 +70,7 @@ struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t id;
- uint16_t flags;
+ RTE_ATOMIC(uint16_t) flags;
};
#define RING_EVENT_FLAGS_ENABLE 0x0
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4fdfe70..24e2b2c 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -948,7 +948,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+ uint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -1037,8 +1037,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vring->desc[vq->used_idx].flags, flags,
+ rte_memory_order_release);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {
@@ -1057,9 +1057,9 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
struct vring *vring = &dev->vrings.split[queue_idx];
/* Consume avail ring, using used ring idx as first one */
- while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ while (rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
!= vring->avail->idx) {
- avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ avail_idx = rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
& (vring->num - 1);
desc_idx = vring->avail->ring[avail_idx];
@@ -1070,7 +1070,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
uep->id = desc_idx;
uep->len = n_descs;
- __atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&vring->used->idx, 1, rte_memory_order_relaxed);
}
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 5d0c039..b7bbdde 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -37,7 +37,7 @@
virtio_mb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
else
rte_mb();
}
@@ -46,7 +46,7 @@
virtio_rmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
else
rte_io_rmb();
}
@@ -55,7 +55,7 @@
virtio_wmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
else
rte_io_wmb();
}
@@ -67,12 +67,12 @@
uint16_t flags;
if (weak_barriers) {
-/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
+/* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+ flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);
#else
flags = dp->flags;
rte_io_rmb();
@@ -90,12 +90,12 @@
uint16_t flags, uint8_t weak_barriers)
{
if (weak_barriers) {
-/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
+/* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);
#else
rte_io_wmb();
dp->flags = flags;
@@ -425,7 +425,7 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
if (vq->hw->weak_barriers) {
/**
- * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+ * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it
* reports a slightly better perf, which comes from the saved
* branch by the compiler.
* The if and else branches are identical with the smp and io
@@ -435,8 +435,8 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
idx = vq->vq_split.ring.used->idx;
rte_smp_rmb();
#else
- idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
- __ATOMIC_ACQUIRE);
+ idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,
+ rte_memory_order_acquire);
#endif
} else {
idx = vq->vq_split.ring.used->idx;
@@ -454,7 +454,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
vq_update_avail_idx(struct virtqueue *vq)
{
if (vq->hw->weak_barriers) {
- /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+ /* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as
* it reports a slightly better perf, which comes from the
* saved branch by the compiler.
* The if and else branches are identical with the smp and
@@ -464,8 +464,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
rte_smp_wmb();
vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
#else
- __atomic_store_n(&vq->vq_split.ring.avail->idx,
- vq->vq_avail_idx, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,
+ vq->vq_avail_idx, rte_memory_order_release);
#endif
} else {
rte_io_wmb();
@@ -528,8 +528,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
- used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
- __ATOMIC_RELAXED); \
+ used_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \
+ rte_memory_order_relaxed); \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
if (virtio_with_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
@@ -546,7 +546,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
" avail.flags=0x%x; used.flags=0x%x", \
(vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
(vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
- __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+ rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, rte_memory_order_relaxed), \
(vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
} while (0)
#else
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 17/45] net/hinic: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (15 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 16/45] net/virtio: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 18/45] net/idpf: " Tyler Retzlaff
` (28 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 7adb6e3..c2cd295 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -1004,7 +1004,7 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
while (pkts < nb_pkts) {
/* 2. current ci is done */
rx_cqe = &rxq->rx_cqe[sw_ci];
- status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
+ status = rte_atomic_load_explicit(&rx_cqe->status, rte_memory_order_acquire);
if (!HINIC_GET_RX_DONE_BE(status))
break;
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 5c30339..d77ef51 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -29,7 +29,7 @@ struct hinic_rq_ctrl {
};
struct hinic_rq_cqe {
- u32 status;
+ RTE_ATOMIC(u32) status;
u32 vlan_len;
u32 offload_type;
u32 rss_hash;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 18/45] net/idpf: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (16 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 17/45] net/hinic: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 19/45] net/qede: " Tyler Retzlaff
` (27 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/idpf/idpf_ethdev.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 86151c9..1df4d6b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -259,8 +259,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed += rte_atomic_load_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -308,7 +308,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&rxq->rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 19/45] net/qede: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (17 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 18/45] net/idpf: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 20/45] net/ring: " Tyler Retzlaff
` (26 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/qede/base/bcm_osal.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2edeb38..abd1186 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -51,11 +51,11 @@ void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
/* Counter to track current memzone allocated */
static uint16_t ecore_mz_count;
-static uint32_t ref_cnt;
+static RTE_ATOMIC(uint32_t) ref_cnt;
int ecore_mz_mapping_alloc(void)
{
- if (__atomic_fetch_add(&ref_cnt, 1, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_fetch_add_explicit(&ref_cnt, 1, rte_memory_order_relaxed) == 0) {
ecore_mz_mapping = rte_calloc("ecore_mz_map",
rte_memzone_max_get(), sizeof(struct rte_memzone *), 0);
}
@@ -68,7 +68,7 @@ int ecore_mz_mapping_alloc(void)
void ecore_mz_mapping_free(void)
{
- if (__atomic_fetch_sub(&ref_cnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ref_cnt, 1, rte_memory_order_relaxed) - 1 == 0) {
rte_free(ecore_mz_mapping);
ecore_mz_mapping = NULL;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 20/45] net/ring: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (18 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 19/45] net/qede: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 21/45] vdpa/mlx5: " Tyler Retzlaff
` (25 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ring/rte_eth_ring.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 48953dd..b16f5d5 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -44,8 +44,8 @@ enum dev_action {
struct ring_queue {
struct rte_ring *rng;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_internals {
@@ -82,7 +82,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts += nb_rx;
else
- __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->rx_pkts, nb_rx, rte_memory_order_relaxed);
return nb_rx;
}
@@ -96,7 +96,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SP_ENQ)
r->tx_pkts += nb_tx;
else
- __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->tx_pkts, nb_tx, rte_memory_order_relaxed);
return nb_tx;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 21/45] vdpa/mlx5: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (19 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 20/45] net/ring: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 22/45] raw/ifpga: " Tyler Retzlaff
` (24 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +++++++++---------
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +++++------
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 ++-
6 files changed, 52 insertions(+), 44 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f900384..98c39a5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -261,8 +261,8 @@
uint32_t timeout = 0;
/* Check and wait all close tasks done. */
- while (__atomic_load_n(&priv->dev_close_progress,
- __ATOMIC_RELAXED) != 0 && timeout < 1000) {
+ while (rte_atomic_load_explicit(&priv->dev_close_progress,
+ rte_memory_order_relaxed) != 0 && timeout < 1000) {
rte_delay_us_sleep(10000);
timeout++;
}
@@ -294,8 +294,8 @@
priv->last_c_thrd_idx = 0;
else
priv->last_c_thrd_idx++;
- __atomic_store_n(&priv->dev_close_progress,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress,
+ 1, rte_memory_order_relaxed);
if (mlx5_vdpa_task_add(priv,
priv->last_c_thrd_idx,
MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
@@ -319,8 +319,8 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress, 0,
+ rte_memory_order_relaxed);
priv->state = MLX5_VDPA_STATE_PROBED;
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
@@ -664,7 +664,9 @@
static int
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t max_queues, index, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
@@ -847,8 +849,8 @@
if (conf_thread_mng.initializer_priv == priv)
if (mlx5_vdpa_mult_threads_create())
goto error;
- __atomic_fetch_add(&conf_thread_mng.refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
+ rte_memory_order_relaxed);
}
if (mlx5_vdpa_create_dev_resources(priv))
goto error;
@@ -937,8 +939,8 @@
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
if (priv->use_c_thread)
- if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
- 1, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
+ 1, rte_memory_order_relaxed) == 1)
mlx5_vdpa_mult_threads_destroy(true);
rte_free(priv);
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 7b37c98..0cc67ed 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type {
struct mlx5_vdpa_task {
struct mlx5_vdpa_priv *priv;
enum mlx5_vdpa_task_type type;
- uint32_t *remaining_cnt;
- uint32_t *err_cnt;
+ RTE_ATOMIC(uint32_t) *remaining_cnt;
+ RTE_ATOMIC(uint32_t) *err_cnt;
uint32_t idx;
} __rte_packed __rte_aligned(4);
@@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread {
struct mlx5_vdpa_conf_thread_mng {
void *initializer_priv;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
uint32_t max_thrds;
pthread_mutex_t cthrd_lock;
struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
@@ -212,7 +212,7 @@ struct mlx5_vdpa_priv {
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
uint16_t last_c_thrd_idx;
- uint16_t dev_close_progress;
+ RTE_ATOMIC(uint16_t) dev_close_progress;
uint16_t num_mrs; /* Number of memory regions. */
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
@@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num);
int
mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time);
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
int
mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
void
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
index 68ed841..84f611c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
@@ -48,7 +48,7 @@
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num)
{
struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
@@ -70,8 +70,8 @@
return -1;
for (i = 0 ; i < num; i++)
if (task[i].remaining_cnt)
- __atomic_fetch_add(task[i].remaining_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
+ rte_memory_order_relaxed);
/* wake up conf thread. */
pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
@@ -80,16 +80,16 @@
}
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time)
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
{
/* Check and wait all tasks done. */
- while (__atomic_load_n(remaining_cnt,
- __ATOMIC_RELAXED) != 0) {
+ while (rte_atomic_load_explicit(remaining_cnt,
+ rte_memory_order_relaxed) != 0) {
rte_delay_us_sleep(sleep_time);
}
- if (__atomic_load_n(err_cnt,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(err_cnt,
+ rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Tasks done with error.");
return true;
}
@@ -137,8 +137,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to register mr %d.", task.idx);
- __atomic_fetch_add(task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task.err_cnt, 1,
+ rte_memory_order_relaxed);
}
break;
case MLX5_VDPA_TASK_SETUP_VIRTQ:
@@ -149,8 +149,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to setup virtq %d.", task.idx);
- __atomic_fetch_add(
- task.err_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ task.err_cnt, 1, rte_memory_order_relaxed);
}
virtq->enable = 1;
pthread_mutex_unlock(&virtq->virtq_lock);
@@ -164,9 +164,9 @@
DRV_LOG(ERR,
"Failed to stop virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -176,9 +176,9 @@
DRV_LOG(ERR,
"Failed to get negotiated features virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -200,9 +200,9 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(
+ rte_atomic_store_explicit(
&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
break;
case MLX5_VDPA_TASK_PREPARE_VIRTQ:
ret = mlx5_vdpa_virtq_single_resource_prepare(
@@ -211,9 +211,9 @@
DRV_LOG(ERR,
"Failed to prepare virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
break;
default:
@@ -222,8 +222,8 @@
break;
}
if (task.remaining_cnt)
- __atomic_fetch_sub(task.remaining_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(task.remaining_cnt,
+ 1, rte_memory_order_relaxed);
}
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 0fa671f..a207734 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -92,7 +92,9 @@
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
uint64_t features;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index e333f0b..4dfe800 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -279,7 +279,9 @@
uint8_t mode = 0;
int ret = -rte_errno;
uint32_t i, thrd_idx, data[1];
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
(priv->vid, &mode, &priv->vmem_info.size,
&priv->vmem_info.gcd, &priv->vmem_info.entries_num);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 607e290..093cdd0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -666,7 +666,9 @@
{
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
struct rte_vhost_vring vq;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 22/45] raw/ifpga: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (20 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 21/45] vdpa/mlx5: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 23/45] event/opdl: " Tyler Retzlaff
` (23 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/raw/ifpga/ifpga_rawdev.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c
index f89bd3f..78d3c88 100644
--- a/drivers/raw/ifpga/ifpga_rawdev.c
+++ b/drivers/raw/ifpga/ifpga_rawdev.c
@@ -73,7 +73,7 @@
static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
-static int ifpga_monitor_refcnt;
+static RTE_ATOMIC(int) ifpga_monitor_refcnt;
static rte_thread_t ifpga_monitor_start_thread;
static struct ifpga_rawdev *
@@ -512,7 +512,7 @@ static int set_surprise_link_check_aer(
int gsd_enable, ret;
#define MS 1000
- while (__atomic_load_n(&ifpga_monitor_refcnt, __ATOMIC_RELAXED)) {
+ while (rte_atomic_load_explicit(&ifpga_monitor_refcnt, rte_memory_order_relaxed)) {
gsd_enable = 0;
for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
ifpga_rdev = &ifpga_rawdevices[i];
@@ -549,7 +549,7 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 1;
- if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_fetch_add_explicit(&ifpga_monitor_refcnt, 1, rte_memory_order_relaxed)) {
ret = rte_thread_create_internal_control(&ifpga_monitor_start_thread,
"ifpga-mon", ifpga_rawdev_gsd_handle, NULL);
if (ret != 0) {
@@ -573,7 +573,8 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 0;
- if (!(__atomic_fetch_sub(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED) - 1) &&
+ if (!(rte_atomic_fetch_sub_explicit(&ifpga_monitor_refcnt, 1,
+ rte_memory_order_relaxed) - 1) &&
ifpga_monitor_start_thread.opaque_id != 0) {
ret = pthread_cancel((pthread_t)ifpga_monitor_start_thread.opaque_id);
if (ret)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 23/45] event/opdl: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (21 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 22/45] raw/ifpga: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 24/45] event/octeontx: " Tyler Retzlaff
` (22 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/opdl/opdl_ring.c | 80 +++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index da5ea02..a86bfb8 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -47,12 +47,12 @@ struct shared_state {
/* Last known minimum sequence number of dependencies, used for multi
* thread operation
*/
- uint32_t available_seq;
+ RTE_ATOMIC(uint32_t) available_seq;
char _pad1[RTE_CACHE_LINE_SIZE * 3];
- uint32_t head; /* Head sequence number (for multi thread operation) */
+ RTE_ATOMIC(uint32_t) head; /* Head sequence number (for multi thread operation) */
char _pad2[RTE_CACHE_LINE_SIZE * 3];
struct opdl_stage *stage; /* back pointer */
- uint32_t tail; /* Tail sequence number */
+ RTE_ATOMIC(uint32_t) tail; /* Tail sequence number */
char _pad3[RTE_CACHE_LINE_SIZE * 2];
} __rte_cache_aligned;
@@ -150,10 +150,10 @@ struct opdl_ring {
available(const struct opdl_stage *s)
{
if (s->threadsafe == true) {
- uint32_t n = __atomic_load_n(&s->shared.available_seq,
- __ATOMIC_ACQUIRE) -
- __atomic_load_n(&s->shared.head,
- __ATOMIC_ACQUIRE);
+ uint32_t n = rte_atomic_load_explicit(&s->shared.available_seq,
+ rte_memory_order_acquire) -
+ rte_atomic_load_explicit(&s->shared.head,
+ rte_memory_order_acquire);
/* Return 0 if available_seq needs to be updated */
return (n <= s->num_slots) ? n : 0;
@@ -169,7 +169,7 @@ struct opdl_ring {
{
uint32_t i;
uint32_t this_tail = s->shared.tail;
- uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+ uint32_t min_seq = rte_atomic_load_explicit(&s->deps[0]->tail, rte_memory_order_acquire);
/* Input stage sequence numbers are greater than the sequence numbers of
* its dependencies so an offset of t->num_slots is needed when
* calculating available slots and also the condition which is used to
@@ -180,16 +180,16 @@ struct opdl_ring {
if (is_input_stage(s)) {
wrap = s->num_slots;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((this_tail - seq) > (this_tail - min_seq))
min_seq = seq;
}
} else {
wrap = 0;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((seq - this_tail) < (min_seq - this_tail))
min_seq = seq;
}
@@ -198,8 +198,8 @@ struct opdl_ring {
if (s->threadsafe == false)
s->available_seq = min_seq + wrap;
else
- __atomic_store_n(&s->shared.available_seq, min_seq + wrap,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.available_seq, min_seq + wrap,
+ rte_memory_order_release);
}
/* Wait until the number of available slots reaches number requested */
@@ -299,7 +299,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -382,18 +382,18 @@ struct opdl_ring {
/* There should be no race condition here. If shared.tail
* matches, no other core can update it until this one does.
*/
- if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&s->shared.tail, rte_memory_order_acquire) ==
tail) {
if (num_entries >= (head - tail)) {
claim_mgr_remove(disclaims);
- __atomic_store_n(&s->shared.tail, head,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, head,
+ rte_memory_order_release);
num_entries -= (head - tail);
} else {
claim_mgr_move_tail(disclaims, num_entries);
- __atomic_store_n(&s->shared.tail,
+ rte_atomic_store_explicit(&s->shared.tail,
num_entries + tail,
- __ATOMIC_RELEASE);
+ rte_memory_order_release);
num_entries = 0;
}
} else if (block == false)
@@ -421,7 +421,7 @@ struct opdl_ring {
opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
false);
- *old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+ *old_head = rte_atomic_load_explicit(&s->shared.head, rte_memory_order_acquire);
while (true) {
bool success;
/* If called by opdl_ring_input(), claim does not need to be
@@ -441,11 +441,10 @@ struct opdl_ring {
if (*num_entries == 0)
return;
- success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+ success = rte_atomic_compare_exchange_weak_explicit(&s->shared.head, old_head,
*old_head + *num_entries,
- true, /* may fail spuriously */
- __ATOMIC_RELEASE, /* memory order on success */
- __ATOMIC_ACQUIRE); /* memory order on fail */
+ rte_memory_order_release, /* memory order on success */
+ rte_memory_order_acquire); /* memory order on fail */
if (likely(success))
break;
rte_pause();
@@ -473,10 +472,11 @@ struct opdl_ring {
/* If another thread started inputting before this one, but hasn't
* finished, we need to wait for it to complete to update the tail.
*/
- rte_wait_until_equal_32(&s->shared.tail, old_head, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&s->shared.tail, old_head,
+ rte_memory_order_acquire);
- __atomic_store_n(&s->shared.tail, old_head + num_entries,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, old_head + num_entries,
+ rte_memory_order_release);
return num_entries;
}
@@ -526,8 +526,8 @@ struct opdl_ring {
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- event = __atomic_load_n(&(ev->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -628,8 +628,8 @@ struct opdl_ring {
num_entries, s->head - old_tail);
num_entries = s->head - old_tail;
}
- __atomic_store_n(&s->shared.tail, num_entries + old_tail,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, num_entries + old_tail,
+ rte_memory_order_release);
}
uint32_t
@@ -658,7 +658,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
@@ -677,7 +677,7 @@ struct opdl_ring {
copy_entries_out(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -756,7 +756,7 @@ struct opdl_ring {
return 0;
}
if (s->threadsafe == false) {
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
s->seq += s->num_claimed;
s->shadow_head = s->head;
s->num_claimed = 0;
@@ -1009,8 +1009,8 @@ struct opdl_ring *
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- event = __atomic_load_n(&(ev_orig->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev_orig->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -1027,9 +1027,9 @@ struct opdl_ring *
if ((event & OPDL_EVENT_MASK) !=
ev_temp) {
- __atomic_store_n(&(ev_orig->event),
- ev_update,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(
+ (uint64_t __rte_atomic *)&ev_orig->event,
+ ev_update, rte_memory_order_release);
ev_updated = true;
}
if (ev_orig->u64 != ev->u64) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 24/45] event/octeontx: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (22 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 23/45] event/opdl: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 25/45] event/dsw: " Tyler Retzlaff
` (21 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/octeontx/timvf_evdev.h | 8 ++++----
drivers/event/octeontx/timvf_worker.h | 36 +++++++++++++++++------------------
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index cef02cd..4bfc3d7 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -126,15 +126,15 @@ enum timvf_clk_src {
struct tim_mem_bucket {
uint64_t first_chunk;
union {
- uint64_t w1;
+ RTE_ATOMIC(uint64_t) w1;
struct {
- uint32_t nb_entry;
+ RTE_ATOMIC(uint32_t) nb_entry;
uint8_t sbt:1;
uint8_t hbt:1;
uint8_t bsk:1;
uint8_t rsvd:5;
- uint8_t lock;
- int16_t chunk_remainder;
+ RTE_ATOMIC(uint8_t) lock;
+ RTE_ATOMIC(int16_t) chunk_remainder;
};
};
uint64_t current_chunk;
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index e4b923e..de9f1b0 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -19,22 +19,22 @@
static inline int16_t
timr_bkt_get_rem(struct tim_mem_bucket *bktp)
{
- return __atomic_load_n(&bktp->chunk_remainder,
- __ATOMIC_ACQUIRE);
+ return rte_atomic_load_explicit(&bktp->chunk_remainder,
+ rte_memory_order_acquire);
}
static inline void
timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_store_n(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline void
timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_fetch_sub(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline uint8_t
@@ -47,14 +47,14 @@
timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
- return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint8_t
@@ -81,34 +81,34 @@
{
/*Clear everything except lock. */
const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
- __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
- __ATOMIC_RELAXED);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
+ rte_memory_order_relaxed);
}
static inline uint64_t
timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
{
const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline void
timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
}
static inline uint32_t
@@ -121,13 +121,13 @@
static inline void
timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
}
static inline void
timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
{
- __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
}
static inline uint64_t
@@ -135,7 +135,7 @@
{
const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
TIM_BUCKET_W1_S_NUM_ENTRIES);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v;
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
}
static inline struct tim_mem_entry *
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 25/45] event/dsw: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (23 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 24/45] event/octeontx: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 26/45] dma/skeleton: " Tyler Retzlaff
` (20 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.h | 6 +++---
drivers/event/dsw/dsw_event.c | 47 +++++++++++++++++++++++++++---------------
drivers/event/dsw/dsw_xstats.c | 4 ++--
3 files changed, 35 insertions(+), 22 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index d745c89..20431d2 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -227,9 +227,9 @@ struct dsw_port {
struct rte_ring *ctl_in_ring __rte_cache_aligned;
/* Estimate of current port load. */
- int16_t load __rte_cache_aligned;
+ RTE_ATOMIC(int16_t) load __rte_cache_aligned;
/* Estimate of flows currently migrating to this port. */
- int32_t immigration_load __rte_cache_aligned;
+ RTE_ATOMIC(int32_t) immigration_load __rte_cache_aligned;
} __rte_cache_aligned;
struct dsw_queue {
@@ -252,7 +252,7 @@ struct dsw_evdev {
uint8_t num_queues;
int32_t max_inflight;
- int32_t credits_on_loan __rte_cache_aligned;
+ RTE_ATOMIC(int32_t) credits_on_loan __rte_cache_aligned;
};
#define DSW_CTL_PAUS_REQ (0)
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 23488d9..70c3c3a 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -33,7 +33,8 @@
}
total_on_loan =
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed);
available = dsw->max_inflight - total_on_loan;
acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
@@ -45,13 +46,16 @@
* allocation.
*/
new_total_on_loan =
- __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED) + acquired_credits;
+ rte_atomic_fetch_add_explicit(&dsw->credits_on_loan,
+ acquired_credits,
+ rte_memory_order_relaxed) +
+ acquired_credits;
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
- __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan,
+ acquired_credits,
+ rte_memory_order_relaxed);
return false;
}
@@ -77,8 +81,9 @@
port->inflight_credits = leave_credits;
- __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan,
+ return_credits,
+ rte_memory_order_relaxed);
DSW_LOG_DP_PORT(DEBUG, port->id,
"Returned %d tokens to pool.\n",
@@ -156,19 +161,22 @@
int16_t period_load;
int16_t new_load;
- old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
+ old_load = rte_atomic_load_explicit(&port->load,
+ rte_memory_order_relaxed);
period_load = dsw_port_load_close_period(port, now);
new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
(DSW_OLD_LOAD_WEIGHT+1);
- __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->load, new_load,
+ rte_memory_order_relaxed);
/* The load of the recently immigrated flows should hopefully
* be reflected the load estimate by now.
*/
- __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->immigration_load, 0,
+ rte_memory_order_relaxed);
}
static void
@@ -390,10 +398,11 @@ struct dsw_queue_flow_burst {
for (i = 0; i < dsw->num_ports; i++) {
int16_t measured_load =
- __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].load,
+ rte_memory_order_relaxed);
int32_t immigration_load =
- __atomic_load_n(&dsw->ports[i].immigration_load,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
+ rte_memory_order_relaxed);
int32_t load = measured_load + immigration_load;
load = RTE_MIN(load, DSW_MAX_LOAD);
@@ -523,8 +532,10 @@ struct dsw_queue_flow_burst {
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
- __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
- candidate_flow_load, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ &dsw->ports[candidate_port_id].immigration_load,
+ candidate_flow_load,
+ rte_memory_order_relaxed);
return true;
}
@@ -882,7 +893,8 @@ struct dsw_queue_flow_burst {
}
source_port_load =
- __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&source_port->load,
+ rte_memory_order_relaxed);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Load %d is below threshold level %d.\n",
@@ -1301,7 +1313,8 @@ struct dsw_queue_flow_burst {
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
+ rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed) >
source_port->new_event_threshold))
return 0;
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index 2a83a28..f61dfd8 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -48,7 +48,7 @@ struct dsw_xstats_port {
static uint64_t
dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
{
- return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
}
static struct dsw_xstat_dev dsw_dev_xstats[] = {
@@ -126,7 +126,7 @@ struct dsw_xstats_port {
{
int16_t load;
- load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
+ load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
return DSW_LOAD_TO_PERCENT(load);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 26/45] dma/skeleton: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (24 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 25/45] event/dsw: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 27/45] crypto/octeontx: " Tyler Retzlaff
` (19 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/dma/skeleton/skeleton_dmadev.c | 5 +++--
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
index 48f88f9..926c188 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.c
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -142,7 +142,7 @@
else if (desc->op == SKELDMA_OP_FILL)
do_fill(desc);
- __atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&hw->completed_count, 1, rte_memory_order_release);
(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
}
@@ -335,7 +335,8 @@
RTE_SET_USED(vchan);
*status = RTE_DMA_VCHAN_IDLE;
- if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
+ if (hw->submitted_count != rte_atomic_load_explicit(&hw->completed_count,
+ rte_memory_order_acquire)
|| hw->zero_req_count == 0)
*status = RTE_DMA_VCHAN_ACTIVE;
return 0;
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
index c9bf315..3730cbc 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.h
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -81,7 +81,7 @@ struct skeldma_hw {
/* Cache delimiter for cpuwork thread's operation data */
char cache2 __rte_cache_aligned;
volatile uint32_t zero_req_count;
- uint64_t completed_count;
+ RTE_ATOMIC(uint64_t) completed_count;
};
#endif /* SKELETON_DMADEV_H */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 27/45] crypto/octeontx: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (25 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 26/45] dma/skeleton: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 28/45] common/mlx5: " Tyler Retzlaff
` (18 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 947e1be..bafd0c1 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -652,7 +652,7 @@
if (!rsp_info->sched_type)
ssows_head_wait(ws);
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
}
@@ -896,7 +896,7 @@
pcount = pending_queue_level(pqueue, DEFAULT_CMD_QLEN);
/* Ensure pcount isn't read before data lands */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
count = (nb_ops > pcount) ? pcount : nb_ops;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 28/45] common/mlx5: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (26 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 27/45] crypto/octeontx: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 29/45] common/idpf: " Tyler Retzlaff
` (17 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/mlx5/linux/mlx5_nl.c | 5 +--
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 ++++-----
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 +++++++++---------
drivers/common/mlx5/mlx5_common_utils.h | 6 ++--
drivers/common/mlx5/mlx5_malloc.c | 58 ++++++++++++++++-----------------
7 files changed, 61 insertions(+), 60 deletions(-)
diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
index 28a1f56..bf6dd19 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -175,10 +175,11 @@ struct mlx5_nl_port_info {
uint16_t state; /**< IB device port state (out). */
};
-uint32_t atomic_sn;
+RTE_ATOMIC(uint32_t) atomic_sn;
/* Generate Netlink sequence number. */
-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)
+#define MLX5_NL_SN_GENERATE (rte_atomic_fetch_add_explicit(&atomic_sn, 1, \
+ rte_memory_order_relaxed) + 1)
/**
* Opens a Netlink socket.
diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 9c80277..14c70ed 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -195,7 +195,7 @@ enum mlx5_cqe_status {
/* Prevent speculative reading of other fields in CQE until
* CQE is valid.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
op_code == MLX5_CQE_REQ_ERR))
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 85ec10d..50922ad 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -35,7 +35,7 @@ struct mlx5_range {
/** Memory region for a mempool. */
struct mlx5_mempool_mr {
struct mlx5_pmd_mr pmd_mr;
- uint32_t refcnt; /**< Number of mempools sharing this MR. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
};
/* Mempool registration. */
@@ -56,11 +56,11 @@ struct mlx5_mempool_reg {
{
struct mlx5_mprq_buf *buf = opaque;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
rte_mempool_put(buf->mp, buf);
- } else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0)) {
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ } else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0)) {
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
rte_mempool_put(buf->mp, buf);
}
}
@@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {
unsigned int i;
for (i = 0; i < mpr->mrs_n; i++)
- __atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {
bool ret = false;
for (i = 0; i < mpr->mrs_n; i++)
- ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0;
+ ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0;
return ret;
}
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index 8789d40..5bdf48a 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {
/* Multi-Packet RQ buffer header. */
struct mlx5_mprq_buf {
struct rte_mempool *mp;
- uint16_t refcnt; /* Atomically accessed refcnt. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
struct rte_mbuf_ext_shared_info shinfos[];
/*
* Shared information per stride.
diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index e69d068..4b95d35 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -81,14 +81,14 @@ struct mlx5_list *
while (entry != NULL) {
if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
if (reuse) {
- ret = __atomic_fetch_add(&entry->ref_cnt, 1,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
+ rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
l_const->name, (void *)entry,
entry->ref_cnt);
} else if (lcore_index < MLX5_LIST_GLOBAL) {
- ret = __atomic_load_n(&entry->ref_cnt,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_load_explicit(&entry->ref_cnt,
+ rte_memory_order_relaxed);
}
if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
return entry;
@@ -151,13 +151,13 @@ struct mlx5_list_entry *
{
struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
- uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
- __ATOMIC_RELAXED);
+ uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
+ rte_memory_order_relaxed);
while (inv_cnt != 0 && entry != NULL) {
struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
- if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
LIST_REMOVE(entry, next);
if (l_const->lcores_share)
l_const->cb_clone_free(l_const->ctx, entry);
@@ -217,7 +217,7 @@ struct mlx5_list_entry *
entry->lcore_idx = (uint32_t)lcore_index;
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
l_const->name, lcore_index,
(void *)entry, entry->ref_cnt);
@@ -254,7 +254,7 @@ struct mlx5_list_entry *
l_inconst->gen_cnt++;
rte_rwlock_write_unlock(&l_inconst->lock);
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
(void *)entry, entry->ref_cnt);
return local_entry;
@@ -285,7 +285,7 @@ struct mlx5_list_entry *
{
struct mlx5_list_entry *gentry = entry->gentry;
- if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
if (entry->lcore_idx == (uint32_t)lcore_idx) {
LIST_REMOVE(entry, next);
@@ -294,23 +294,23 @@ struct mlx5_list_entry *
else
l_const->cb_remove(l_const->ctx, entry);
} else {
- __atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+ 1, rte_memory_order_relaxed);
}
if (!l_const->lcores_share) {
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)entry);
return 0;
}
- if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
rte_rwlock_write_lock(&l_inconst->lock);
if (likely(gentry->ref_cnt == 0)) {
LIST_REMOVE(gentry, next);
rte_rwlock_write_unlock(&l_inconst->lock);
l_const->cb_remove(l_const->ctx, gentry);
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)gentry);
return 0;
@@ -377,7 +377,7 @@ struct mlx5_list_entry *
mlx5_list_get_entry_num(struct mlx5_list *list)
{
MLX5_ASSERT(list);
- return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
}
/********************* Hash List **********************/
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index ae15119..cb4d104 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -29,7 +29,7 @@
*/
struct mlx5_list_entry {
LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
- uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
+ RTE_ATOMIC(uint32_t) ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
uint32_t lcore_idx;
union {
struct mlx5_list_entry *gentry;
@@ -39,7 +39,7 @@ struct mlx5_list_entry {
struct mlx5_list_cache {
LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
- uint32_t inv_cnt; /* Invalid entries counter. */
+ RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
} __rte_cache_aligned;
/**
@@ -111,7 +111,7 @@ struct mlx5_list_const {
struct mlx5_list_inconst {
rte_rwlock_t lock; /* read/write lock. */
volatile uint32_t gen_cnt; /* List modification may update it. */
- volatile uint32_t count; /* number of entries in list. */
+ volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
struct mlx5_list_cache *cache[MLX5_LIST_MAX];
/* Lcore cache, last index is the global cache. */
};
diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
index c58c41d..ef6dabe 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -16,7 +16,7 @@ struct mlx5_sys_mem {
uint32_t init:1; /* Memory allocator initialized. */
uint32_t enable:1; /* System memory select. */
uint32_t reserve:30; /* Reserve. */
- struct rte_memseg_list *last_msl;
+ RTE_ATOMIC(struct rte_memseg_list *) last_msl;
/* last allocated rte memory memseg list. */
#ifdef RTE_LIBRTE_MLX5_DEBUG
uint64_t malloc_sys;
@@ -93,14 +93,14 @@ struct mlx5_sys_mem {
* different with the cached msl.
*/
if (addr && !mlx5_mem_check_msl(addr,
- (struct rte_memseg_list *)__atomic_load_n
- (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
- __atomic_store_n(&mlx5_sys_mem.last_msl,
+ (struct rte_memseg_list *)rte_atomic_load_explicit
+ (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
+ rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
rte_mem_virt2memseg_list(addr),
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
+ rte_memory_order_relaxed);
#endif
}
}
@@ -122,11 +122,11 @@ struct mlx5_sys_mem {
* to check if the memory belongs to rte memory.
*/
if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
- __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+ rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
if (!rte_mem_virt2memseg_list(addr))
return false;
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
#endif
}
return true;
@@ -185,8 +185,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -199,8 +199,8 @@ struct mlx5_sys_mem {
addr = malloc(size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -233,8 +233,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(new_addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -246,8 +246,8 @@ struct mlx5_sys_mem {
new_addr = realloc(addr, size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -259,14 +259,14 @@ struct mlx5_sys_mem {
return;
if (!mlx5_mem_is_rte(addr)) {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
+ rte_memory_order_relaxed);
#endif
mlx5_os_free(addr);
} else {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
+ rte_memory_order_relaxed);
#endif
rte_free(addr);
}
@@ -280,14 +280,14 @@ struct mlx5_sys_mem {
" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
" update:%"PRIi64"",
- __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
#endif
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 29/45] common/idpf: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (27 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 28/45] common/mlx5: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 30/45] common/iavf: " Tyler Retzlaff
` (16 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/idpf/idpf_common_device.h | 6 +++---
drivers/common/idpf/idpf_common_rxtx.c | 14 ++++++++------
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 ++++++++--------
4 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 2b94f03..6a44cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -48,7 +48,7 @@ struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
struct virtchnl2_get_capabilities caps;
- volatile uint32_t pend_cmd; /* pending command not finished */
+ volatile RTE_ATOMIC(uint32_t) pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from cp */
uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
@@ -179,8 +179,8 @@ struct idpf_cmd_info {
atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops)
{
uint32_t op_unk = VIRTCHNL2_OP_UNKNOWN;
- bool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ bool ret = rte_atomic_compare_exchange_strong_explicit(&adapter->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
DRV_LOG(ERR, "There is incomplete cmd %d", adapter->pend_cmd);
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 83b131e..b09c58c 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -592,8 +592,8 @@
next_avail = 0;
rx_bufq->nb_rx_hold -= delta;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
return;
@@ -612,8 +612,8 @@
next_avail += nb_refill;
rx_bufq->nb_rx_hold -= nb_refill;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
}
@@ -1093,7 +1093,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(nmb == NULL)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -1203,7 +1204,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index b49b1ed..eeeeed1 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -97,7 +97,7 @@
#define IDPF_RX_SPLIT_BUFQ2_ID 2
struct idpf_rx_stats {
- uint64_t mbuf_alloc_failed;
+ RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
struct idpf_rx_queue {
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index f65e8d5..3b5e124 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -38,8 +38,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -168,8 +168,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
@@ -564,8 +564,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
@@ -638,8 +638,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 30/45] common/iavf: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (28 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 29/45] common/idpf: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 31/45] baseband/acc: " Tyler Retzlaff
` (15 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/iavf/iavf_impl.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
index 8919b0e..c0ff301 100644
--- a/drivers/common/iavf/iavf_impl.c
+++ b/drivers/common/iavf/iavf_impl.c
@@ -18,7 +18,7 @@ enum iavf_status
u64 size,
u32 alignment)
{
- static uint64_t iavf_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) iavf_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -26,7 +26,7 @@ enum iavf_status
return IAVF_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "iavf_dma_%" PRIu64,
- __atomic_fetch_add(&iavf_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&iavf_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 31/45] baseband/acc: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (29 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 30/45] common/iavf: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 32/45] net/txgbe: " Tyler Retzlaff
` (14 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 34 deletions(-)
diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
index 4f666e5..ee50b9c 100644
--- a/drivers/baseband/acc/rte_acc100_pmd.c
+++ b/drivers/baseband/acc/rte_acc100_pmd.c
@@ -3673,8 +3673,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3728,8 +3728,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3742,8 +3742,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3755,8 +3755,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d\n",
desc, rsp.val, descs_in_tb, desc->req.numCBs);
@@ -3793,8 +3793,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3846,8 +3846,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3902,8 +3902,8 @@
uint8_t cbs_in_tb = 1, cb_idx = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3919,8 +3919,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3930,8 +3930,8 @@
/* Read remaining CBs if exists */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
desc, rsp.val, cb_idx, cbs_in_tb);
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index 88b1104..f7c54be 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -3119,7 +3119,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
return -1;
@@ -3157,7 +3158,8 @@
struct rte_bbdev_enc_op *op;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3192,7 +3194,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + 1 > max_requested_ops)
return -1;
@@ -3208,7 +3211,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3220,7 +3224,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
@@ -3246,7 +3251,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3290,7 +3296,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3346,7 +3353,8 @@
uint32_t tb_crc_check = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3362,7 +3370,8 @@
/* Check if last CB in TB is ready to dequeue (and thus the whole TB) - checking sdone bit.
* If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3372,7 +3381,8 @@
/* Read remaining CBs if exists. */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
rsp.val, desc->rsp.add_info_0,
@@ -3790,7 +3800,8 @@
struct rte_bbdev_fft_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4116,7 +4127,8 @@
uint8_t descs_in_op, i;
desc = acc_desc_tail(q, dequeued_ops);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4127,7 +4139,8 @@
/* Get last CB. */
last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - 1);
/* Check if last op is ready to dequeue by checking fdone bit. If not exit. */
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -4137,8 +4150,8 @@
for (i = 1; i < descs_in_op - 1; i++) {
last_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
& q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit(
+ (uint64_t __rte_atomic *)last_desc, rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
}
@@ -4154,7 +4167,8 @@
for (i = 0; i < descs_in_op; i++) {
desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 32/45] net/txgbe: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (30 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 31/45] baseband/acc: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 33/45] net/null: " Tyler Retzlaff
` (13 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/txgbe/txgbe_ethdev.c | 12 +++++++-----
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
3 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b75e889..a58f197 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -595,7 +595,7 @@ static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -2834,7 +2834,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev);
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -2859,7 +2859,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
rte_thread_detach(rte_thread_self());
txgbe_dev_setup_link_alarm_handler(dev);
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -2908,7 +2908,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
} else if (hw->phy.media_type == txgbe_media_type_fiber &&
dev->data->dev_conf.intr_conf.lsc != 0) {
txgbe_dev_wait_setup_link_complete(dev, 0);
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the TXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -2918,7 +2919,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
"txgbe-link",
txgbe_dev_setup_link_thread_handler, dev) < 0) {
PMD_DRV_LOG(ERR, "Create link thread failed!");
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 7e8067c..e8f55f7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -372,7 +372,7 @@ struct txgbe_adapter {
/* For RSS reta table update */
uint8_t rss_reta_updated;
- uint32_t link_thread_running;
+ RTE_ATOMIC(uint32_t) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index f1341fb..1abc190 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -206,7 +206,7 @@ static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->device_id = pci_dev->id.device_id;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 33/45] net/null: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (31 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 32/45] net/txgbe: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 34/45] event/dlb2: " Tyler Retzlaff
` (12 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/null/rte_eth_null.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 7c46004..f4ed3b8 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -37,8 +37,8 @@ struct null_queue {
struct rte_mempool *mb_pool;
struct rte_mbuf *dummy_packet;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_options {
@@ -102,7 +102,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -130,7 +130,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -155,7 +155,7 @@ struct pmd_internals {
rte_pktmbuf_free(bufs[i]);
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -178,7 +178,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 34/45] event/dlb2: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (32 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 33/45] net/null: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 35/45] dma/idxd: " Tyler Retzlaff
` (11 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/dlb2/dlb2.c | 34 +++++++++++++++++-----------------
drivers/event/dlb2/dlb2_priv.h | 15 +++++++--------
drivers/event/dlb2/dlb2_xstats.c | 2 +-
3 files changed, 25 insertions(+), 26 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 628ddef..0b91f03 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1005,7 +1005,7 @@ struct process_local_port_data
}
dlb2->new_event_limit = config->nb_events_limit;
- __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&dlb2->inflights, 0, rte_memory_order_seq_cst);
/* Save number of ports/queues for this event dev */
dlb2->num_ports = config->nb_event_ports;
@@ -2668,10 +2668,10 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
batch_size = credits;
if (likely(credits &&
- __atomic_compare_exchange_n(
+ rte_atomic_compare_exchange_strong_explicit(
qm_port->credit_pool[type],
- &credits, credits - batch_size, false,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
+ &credits, credits - batch_size,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst)))
return batch_size;
else
return 0;
@@ -2687,7 +2687,7 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
/* Replenish credits, saving one quanta for enqueues */
uint16_t val = ev_port->inflight_credits - quanta;
- __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
ev_port->inflight_credits -= val;
}
}
@@ -2696,8 +2696,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_port *ev_port)
{
- uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
- __ATOMIC_SEQ_CST);
+ uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
+ rte_memory_order_seq_cst);
const int num = 1;
if (unlikely(ev_port->inflight_max < sw_inflights)) {
@@ -2719,8 +2719,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
return 1;
}
- __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
+ rte_memory_order_seq_cst);
ev_port->inflight_credits += (credit_update_quanta);
if (ev_port->inflight_credits < num) {
@@ -3234,17 +3234,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_ldb_credits += num;
if (qm_port->cached_ldb_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_LDB_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_ldb_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
@@ -3252,17 +3252,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_dir_credits += num;
if (qm_port->cached_dir_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_DIR_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_dir_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index 31a3bee..4ff340d 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -348,7 +348,7 @@ struct dlb2_port {
uint32_t dequeue_depth;
enum dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
- uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) *credit_pool[DLB2_NUM_QUEUE_TYPES];
union {
struct {
uint16_t cached_ldb_credits;
@@ -586,7 +586,7 @@ struct dlb2_eventdev {
uint32_t xstats_count_mode_dev;
uint32_t xstats_count_mode_port;
uint32_t xstats_count;
- uint32_t inflights; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) inflights;
uint32_t new_event_limit;
int max_num_events_override;
int num_dir_credits_override;
@@ -623,15 +623,14 @@ struct dlb2_eventdev {
struct {
uint16_t max_ldb_credits;
uint16_t max_dir_credits;
- /* use __atomic builtins */ /* shared hw cred */
- uint32_t ldb_credit_pool __rte_cache_aligned;
- /* use __atomic builtins */ /* shared hw cred */
- uint32_t dir_credit_pool __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) ldb_credit_pool
+ __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) dir_credit_pool
+ __rte_cache_aligned;
};
struct {
uint16_t max_credits;
- /* use __atomic builtins */ /* shared hw cred */
- uint32_t credit_pool __rte_cache_aligned;
+ RTE_ATOMIC(uint32_t) credit_pool __rte_cache_aligned;
};
};
uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */
diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
index ff15271..22094f3 100644
--- a/drivers/event/dlb2/dlb2_xstats.c
+++ b/drivers/event/dlb2/dlb2_xstats.c
@@ -173,7 +173,7 @@ struct dlb2_xstats_entry {
case nb_events_limit:
return dlb2->new_event_limit;
case inflight_events:
- return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
+ return rte_atomic_load_explicit(&dlb2->inflights, rte_memory_order_seq_cst);
case ldb_pool_size:
return dlb2->num_ldb_credits;
case dir_pool_size:
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 35/45] dma/idxd: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (33 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 34/45] event/dlb2: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 36/45] crypto/ccp: " Tyler Retzlaff
` (10 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index cd41777..537cf9b 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -33,7 +33,7 @@ struct idxd_pci_common {
rte_spinlock_t lk;
uint8_t wq_cfg_sz;
- uint16_t ref_count;
+ RTE_ATOMIC(uint16_t) ref_count;
volatile struct rte_idxd_bar0 *regs;
volatile uint32_t *wq_regs_base;
volatile struct rte_idxd_grpcfg *grp_regs;
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index a78889a..06fa115 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -136,7 +136,8 @@
* the PCI struct
*/
/* NOTE: review for potential ordering optimization */
- is_last_wq = (__atomic_fetch_sub(&idxd->u.pci->ref_count, 1, __ATOMIC_SEQ_CST) == 1);
+ is_last_wq = (rte_atomic_fetch_sub_explicit(&idxd->u.pci->ref_count, 1,
+ rte_memory_order_seq_cst) == 1);
if (is_last_wq) {
/* disable the device */
err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -330,9 +331,9 @@
return ret;
}
qid = rte_dma_get_dev_id_by_name(qname);
- max_qid = __atomic_load_n(
+ max_qid = rte_atomic_load_explicit(
&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
- __ATOMIC_SEQ_CST);
+ rte_memory_order_seq_cst);
/* we have queue 0 done, now configure the rest of the queues */
for (qid = 1; qid < max_qid; qid++) {
@@ -389,7 +390,7 @@
free(idxd.u.pci);
return ret;
}
- __atomic_fetch_add(&idxd.u.pci->ref_count, 1, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&idxd.u.pci->ref_count, 1, rte_memory_order_seq_cst);
}
return 0;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 36/45] crypto/ccp: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (34 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 35/45] dma/idxd: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 37/45] common/cpt: " Tyler Retzlaff
` (9 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/crypto/ccp/ccp_dev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index b7ca3af..41c1422 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -116,15 +116,15 @@ struct ccp_queue *
static inline void
ccp_set_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ (1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline void
ccp_clear_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_and_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ ~(1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline uint32_t
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 37/45] common/cpt: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (35 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 36/45] crypto/ccp: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 38/45] bus/vmbus: " Tyler Retzlaff
` (8 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/cpt/cpt_common.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index d70668a..dc79e3a 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -73,7 +73,7 @@ struct cpt_request_info {
const unsigned int qsize)
{
/* Ensure ordering between setting the entry and updating the tail */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
q->tail = (q->tail + cnt) & (qsize - 1);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 38/45] bus/vmbus: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (36 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 37/45] common/cpt: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 39/45] examples: " Tyler Retzlaff
` (7 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
drivers/bus/vmbus/vmbus_channel.c | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/bus/vmbus/rte_vmbus_reg.h b/drivers/bus/vmbus/rte_vmbus_reg.h
index a17ce40..e3299aa 100644
--- a/drivers/bus/vmbus/rte_vmbus_reg.h
+++ b/drivers/bus/vmbus/rte_vmbus_reg.h
@@ -28,7 +28,7 @@ struct vmbus_message {
*/
struct vmbus_mon_trig {
- uint32_t pending;
+ RTE_ATOMIC(uint32_t) pending;
uint32_t armed;
} __rte_packed;
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index 4d74df3..925c2aa 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -19,16 +19,16 @@
#include "private.h"
static inline void
-vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+vmbus_sync_set_bit(volatile RTE_ATOMIC(uint32_t) *addr, uint32_t mask)
{
- /* Use GCC builtin which atomic does atomic OR operation */
- __atomic_fetch_or(addr, mask, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit(addr, mask, rte_memory_order_seq_cst);
}
static inline void
vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
{
- uint32_t *monitor_addr, monitor_mask;
+ RTE_ATOMIC(uint32_t) *monitor_addr;
+ uint32_t monitor_mask;
unsigned int trigger_index;
trigger_index = monitor_id / HV_MON_TRIG_LEN;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 39/45] examples: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (37 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 38/45] bus/vmbus: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 40/45] app/dumpcap: " Tyler Retzlaff
` (6 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
examples/bbdev_app/main.c | 13 +++++----
examples/l2fwd-event/l2fwd_common.h | 4 +--
examples/l2fwd-event/l2fwd_event.c | 24 ++++++++--------
examples/l2fwd-jobstats/main.c | 11 ++++----
.../client_server_mp/mp_server/main.c | 6 ++--
examples/server_node_efd/efd_server/main.c | 6 ++--
examples/vhost/main.c | 32 +++++++++++-----------
examples/vhost/main.h | 4 +--
examples/vhost/virtio_net.c | 13 +++++----
examples/vhost_blk/vhost_blk.c | 8 +++---
examples/vm_power_manager/channel_monitor.c | 9 +++---
11 files changed, 68 insertions(+), 62 deletions(-)
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 16599ae..214fdf2 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -165,7 +165,7 @@ struct stats_lcore_params {
.num_dec_cores = 1,
};
-static uint16_t global_exit_flag;
+static RTE_ATOMIC(uint16_t) global_exit_flag;
/* display usage */
static inline void
@@ -277,7 +277,7 @@ uint16_t bbdev_parse_number(const char *mask)
signal_handler(int signum)
{
printf("\nSignal %d received\n", signum);
- __atomic_store_n(&global_exit_flag, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 1, rte_memory_order_relaxed);
}
static void
@@ -321,7 +321,8 @@ uint16_t bbdev_parse_number(const char *mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME &&
- !__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED); count++) {
+ !rte_atomic_load_explicit(&global_exit_flag,
+ rte_memory_order_relaxed); count++) {
memset(&link, 0, sizeof(link));
link_get_err = rte_eth_link_get_nowait(port_id, &link);
@@ -675,7 +676,7 @@ uint16_t bbdev_parse_number(const char *mask)
{
struct stats_lcore_params *stats_lcore = arg;
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
print_stats(stats_lcore);
rte_delay_ms(500);
}
@@ -921,7 +922,7 @@ uint16_t bbdev_parse_number(const char *mask)
const bool run_decoder = (lcore_conf->core_type &
(1 << RTE_BBDEV_OP_TURBO_DEC));
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
if (run_encoder)
run_encoding(lcore_conf);
if (run_decoder)
@@ -1055,7 +1056,7 @@ uint16_t bbdev_parse_number(const char *mask)
.align = alignof(struct rte_mbuf *),
};
- __atomic_store_n(&global_exit_flag, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 0, rte_memory_order_relaxed);
sigret = signal(SIGTERM, signal_handler);
if (sigret == SIG_ERR)
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index 07f84cb..3d2e303 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -61,8 +61,8 @@
/* Per-port statistics struct */
struct l2fwd_port_statistics {
uint64_t dropped;
- uint64_t tx;
- uint64_t rx;
+ RTE_ATOMIC(uint64_t) tx;
+ RTE_ATOMIC(uint64_t) rx;
} __rte_cache_aligned;
/* Event vector attributes */
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 4b5a032..2247202 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -163,8 +163,8 @@
dst_port = rsrc->dst_ports[mbuf->port];
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].rx,
+ 1, rte_memory_order_relaxed);
mbuf->port = dst_port;
if (flags & L2FWD_EVENT_UPDT_MAC)
@@ -179,8 +179,8 @@
rte_event_eth_tx_adapter_txq_set(mbuf, 0);
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].tx,
+ 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -367,8 +367,8 @@
vec->queue = 0;
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbufs[0]->port].rx,
+ vec->nb_elem, rte_memory_order_relaxed);
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (j < vec->nb_elem)
@@ -382,14 +382,14 @@
}
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[vec->port].tx,
+ vec->nb_elem, rte_memory_order_relaxed);
} else {
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].rx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (j < vec->nb_elem)
rte_prefetch0(
@@ -406,9 +406,9 @@
rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].tx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
}
}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 2653db4..9a094ef 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -80,7 +80,7 @@ struct lcore_queue_conf {
struct rte_jobstats idle_job;
struct rte_jobstats_context jobs_context;
- uint16_t stats_read_pending;
+ RTE_ATOMIC(uint16_t) stats_read_pending;
rte_spinlock_t lock;
} __rte_cache_aligned;
/* >8 End of list of queues to be polled for given lcore. */
@@ -151,9 +151,9 @@ struct l2fwd_port_statistics {
uint64_t collection_time = rte_get_timer_cycles();
/* Ask forwarding thread to give us stats. */
- __atomic_store_n(&qconf->stats_read_pending, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&qconf->lock);
- __atomic_store_n(&qconf->stats_read_pending, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 0, rte_memory_order_relaxed);
/* Collect context statistics. */
stats_period = ctx->state_time - ctx->start_time;
@@ -522,8 +522,9 @@ struct l2fwd_port_statistics {
repeats++;
need_manage = qconf->flush_timer.expire < now;
/* Check if we was esked to give a stats. */
- stats_read_pending = __atomic_load_n(&qconf->stats_read_pending,
- __ATOMIC_RELAXED);
+ stats_read_pending = rte_atomic_load_explicit(
+ &qconf->stats_read_pending,
+ rte_memory_order_relaxed);
need_manage |= stats_read_pending;
for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index f54bb8b..ebfc2fe 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -157,12 +157,12 @@ struct client_rx_buf {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
uint32_t status = 0;
/* Only one core should display stats */
- if (__atomic_compare_exchange_n(&display_stats, &status, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &status, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/server_node_efd/efd_server/main.c b/examples/server_node_efd/efd_server/main.c
index fd72882..75ff0ea 100644
--- a/examples/server_node_efd/efd_server/main.c
+++ b/examples/server_node_efd/efd_server/main.c
@@ -177,12 +177,12 @@ struct efd_stats {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
/* Only one core should display stats */
uint32_t display_init = 0;
- if (__atomic_compare_exchange_n(&display_stats, &display_init, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &display_init, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned int sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3fc1b15..4391d88 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1052,10 +1052,10 @@ static unsigned check_ports_num(unsigned nb_ports)
}
if (enable_stats) {
- __atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_total_atomic, 1,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
src_vdev->stats.tx_total++;
src_vdev->stats.tx += ret;
}
@@ -1072,10 +1072,10 @@ static unsigned check_ports_num(unsigned nb_ports)
ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, nr_xmit,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1404,10 +1404,10 @@ static void virtio_tx_offload(struct rte_mbuf *m)
}
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, rx_count,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, enqueue_count,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1832,10 +1832,10 @@ uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
- __ATOMIC_SEQ_CST);
- rx = __atomic_load_n(&vdev->stats.rx_atomic,
- __ATOMIC_SEQ_CST);
+ rx_total = rte_atomic_load_explicit(&vdev->stats.rx_total_atomic,
+ rte_memory_order_seq_cst);
+ rx = rte_atomic_load_explicit(&vdev->stats.rx_atomic,
+ rte_memory_order_seq_cst);
rx_dropped = rx_total - rx;
printf("Statistics for device %d\n"
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 2fcb837..b163955 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -22,8 +22,8 @@
struct device_statistics {
uint64_t tx;
uint64_t tx_total;
- uint64_t rx_atomic;
- uint64_t rx_total_atomic;
+ RTE_ATOMIC(uint64_t) rx_atomic;
+ RTE_ATOMIC(uint64_t) rx_total_atomic;
};
struct vhost_queue {
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 514c8e0..55af6e7 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -198,7 +198,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
+ avail_idx = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire);
start_idx = queue->last_used_idx;
free_entries = avail_idx - start_idx;
count = RTE_MIN(count, free_entries);
@@ -231,7 +232,8 @@
rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
}
- __atomic_fetch_add(&vr->used->idx, count, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, count,
+ rte_memory_order_release);
queue->last_used_idx += count;
rte_vhost_vring_call(dev->vid, queue_id);
@@ -386,8 +388,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
- queue->last_avail_idx;
+ free_entries = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire) - queue->last_avail_idx;
if (free_entries == 0)
return 0;
@@ -442,7 +444,8 @@
queue->last_avail_idx += i;
queue->last_used_idx += i;
- __atomic_fetch_add(&vr->used->idx, i, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, i,
+ rte_memory_order_acq_rel);
rte_vhost_vring_call(dev->vid, queue_id);
diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
index 376f7b8..03f1ac9 100644
--- a/examples/vhost_blk/vhost_blk.c
+++ b/examples/vhost_blk/vhost_blk.c
@@ -85,9 +85,9 @@ struct vhost_blk_ctrlr *
*/
used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
used->idx++;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
vq->id, used->idx, task->req_idx);
@@ -111,12 +111,12 @@ struct vhost_blk_ctrlr *
desc->id = task->buffer_id;
desc->addr = 0;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
if (vq->used_wrap_counter)
desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
else
desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
task->inflight_idx);
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 5fef268..d384c86 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -828,8 +828,9 @@ void channel_monitor_exit(void)
return -1;
uint32_t channel_connected = CHANNEL_MGR_CHANNEL_CONNECTED;
- if (__atomic_compare_exchange_n(&(chan_info->status), &channel_connected,
- CHANNEL_MGR_CHANNEL_PROCESSING, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_connected,
+ CHANNEL_MGR_CHANNEL_PROCESSING, rte_memory_order_relaxed,
+ rte_memory_order_relaxed) == 0)
return -1;
if (pkt->command == RTE_POWER_CPU_POWER) {
@@ -934,8 +935,8 @@ void channel_monitor_exit(void)
* from management thread
*/
uint32_t channel_processing = CHANNEL_MGR_CHANNEL_PROCESSING;
- __atomic_compare_exchange_n(&(chan_info->status), &channel_processing,
- CHANNEL_MGR_CHANNEL_CONNECTED, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_processing,
+ CHANNEL_MGR_CHANNEL_CONNECTED, rte_memory_order_relaxed, rte_memory_order_relaxed);
return 0;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 40/45] app/dumpcap: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (38 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 39/45] examples: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 41/45] app/test: " Tyler Retzlaff
` (5 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/dumpcap/main.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/app/dumpcap/main.c b/app/dumpcap/main.c
index cc0f66b..b25b95e 100644
--- a/app/dumpcap/main.c
+++ b/app/dumpcap/main.c
@@ -51,7 +51,7 @@
/* command line flags */
static const char *progname;
-static bool quit_signal;
+static RTE_ATOMIC(bool) quit_signal;
static bool group_read;
static bool quiet;
static bool use_pcapng = true;
@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv)
static void
signal_handler(int sig_num __rte_unused)
{
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
@@ -490,7 +490,7 @@ static void statistics_loop(void)
printf("%-15s %10s %10s\n",
"Interface", "Received", "Dropped");
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
RTE_ETH_FOREACH_DEV(p) {
if (rte_eth_dev_get_name_by_port(p, name) < 0)
continue;
@@ -528,7 +528,7 @@ static void statistics_loop(void)
static void
monitor_primary(void *arg __rte_unused)
{
- if (__atomic_load_n(&quit_signal, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed))
return;
if (rte_eal_primary_proc_alive(NULL)) {
@@ -536,7 +536,7 @@ static void statistics_loop(void)
} else {
fprintf(stderr,
"Primary process is no longer active, exiting...\n");
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
}
@@ -983,7 +983,7 @@ int main(int argc, char **argv)
show_count(0);
}
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
if (process_ring(out, r) < 0) {
fprintf(stderr, "pcapng file write failed; %s\n",
strerror(errno));
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 41/45] app/test: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (39 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 40/45] app/dumpcap: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 42/45] app/test-eventdev: " Tyler Retzlaff
` (4 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test/test_bpf.c | 46 ++++++++-----
app/test/test_distributor.c | 114 ++++++++++++++++-----------------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++++----
app/test/test_hash_multiwriter.c | 16 ++---
app/test/test_hash_readwrite.c | 74 ++++++++++-----------
app/test/test_hash_readwrite_lf_perf.c | 88 ++++++++++++-------------
app/test/test_lcores.c | 25 ++++----
app/test/test_lpm_perf.c | 14 ++--
app/test/test_mcslock.c | 12 ++--
app/test/test_mempool_perf.c | 9 +--
app/test/test_pflock.c | 13 ++--
app/test/test_pmd_perf.c | 10 +--
app/test/test_rcu_qsbr_perf.c | 114 +++++++++++++++++----------------
app/test/test_ring_perf.c | 11 ++--
app/test/test_ring_stress_impl.h | 10 +--
app/test/test_rwlock.c | 9 +--
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +++----
app/test/test_spinlock.c | 9 +--
app/test/test_stack_perf.c | 12 ++--
app/test/test_threads.c | 33 +++++-----
app/test/test_ticketlock.c | 9 +--
app/test/test_timer.c | 31 +++++----
24 files changed, 378 insertions(+), 343 deletions(-)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index 53e3a31..2e43442 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -39,8 +39,8 @@
*/
struct dummy_offset {
- uint64_t u64;
- uint32_t u32;
+ RTE_ATOMIC(uint64_t) u64;
+ RTE_ATOMIC(uint32_t) u32;
uint16_t u16;
uint8_t u8;
};
@@ -1581,32 +1581,46 @@ struct bpf_test {
memset(&dfe, 0, sizeof(dfe));
rv = 1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = -1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = (int32_t)TEST_FILL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_3;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
}
diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index d2037b7..df871e3 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -47,14 +47,14 @@ struct worker_params {
struct worker_params worker_params;
/* statics - all zero-initialized by default */
-static volatile int quit; /**< general quit variable for all threads */
-static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
-static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
-static volatile unsigned worker_idx;
-static volatile unsigned zero_idx;
+static volatile RTE_ATOMIC(int) quit; /**< general quit variable for all threads */
+static volatile RTE_ATOMIC(int) zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile RTE_ATOMIC(int) zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
+static volatile RTE_ATOMIC(unsigned int) zero_idx;
struct worker_stats {
- volatile unsigned handled_packets;
+ volatile RTE_ATOMIC(unsigned int) handled_packets;
} __rte_cache_aligned;
struct worker_stats worker_stats[RTE_MAX_LCORE];
@@ -66,8 +66,8 @@ struct worker_stats {
{
unsigned i, count = 0;
for (i = 0; i < worker_idx; i++)
- count += __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED);
+ count += rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed);
return count;
}
@@ -77,8 +77,8 @@ struct worker_stats {
{
unsigned int i;
for (i = 0; i < RTE_MAX_LCORE; i++)
- __atomic_store_n(&worker_stats[i].handled_packets, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&worker_stats[i].handled_packets, 0,
+ rte_memory_order_relaxed);
}
/* this is the basic worker function for sanity test
@@ -91,17 +91,17 @@ struct worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -162,8 +162,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with all zero hashes done.\n");
/* pick two flows and check they go correctly */
@@ -189,9 +189,9 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(
+ rte_atomic_load_explicit(
&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_memory_order_relaxed));
printf("Sanity test with two hash values done\n");
}
@@ -218,8 +218,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with non-zero hashes done\n");
rte_mempool_put_bulk(p, (void *)bufs, BURST);
@@ -311,18 +311,18 @@ struct worker_stats {
struct rte_distributor *d = wp->dist;
unsigned int i;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
rte_pktmbuf_free(buf[i]);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
@@ -381,51 +381,51 @@ struct worker_stats {
unsigned int num;
unsigned int zero_id = 0;
unsigned int zero_unset;
- const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
- __ATOMIC_RELAXED);
+ const unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
/* wait for quit single globally, or for worker zero, wait
* for zero_quit */
while (!quit && !(id == zero_id && zero_quit)) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
if (id == zero_id) {
rte_distributor_return_pkt(d, id, NULL, 0);
/* for worker zero, allow it to restart to pick up last packet
* when all workers are shutting down.
*/
- __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 1, rte_memory_order_release);
while (zero_quit)
usleep(100);
- __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 0, rte_memory_order_release);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets,
- num, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets,
+ num, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
}
@@ -491,17 +491,17 @@ struct worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST * 2) {
printf("Line %d: Error, not all packets flushed. "
@@ -560,18 +560,18 @@ struct worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
@@ -596,18 +596,18 @@ struct worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num, i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
*seq_field(buf[i]) += id + 1;
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -679,8 +679,8 @@ struct worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
/* Sort returned packets by sent order (sequence numbers). */
for (i = 0; i < buf_count; i++) {
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index ca86845..ba3cf26 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -31,7 +31,7 @@
/* static vars - zero initialized by default */
static volatile int quit;
-static volatile unsigned worker_idx;
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
struct worker_stats {
volatile unsigned handled_packets;
@@ -121,7 +121,7 @@ struct worker_stats {
struct rte_distributor *d = arg;
unsigned int num = 0;
int i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
struct rte_mbuf *buf[8] __rte_cache_aligned;
for (i = 0; i < 8; i++)
diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index 9296de2..bae39af 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -53,12 +53,13 @@
#define MAX_LCORES (rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))
-static uint32_t obj_count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) obj_count;
+static RTE_ATOMIC(uint32_t) synchro;
#define WAIT_SYNCHRO_FOR_WORKERS() do { \
if (lcore_self != rte_get_main_lcore()) \
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, \
+ rte_memory_order_relaxed); \
} while(0)
/*
@@ -71,7 +72,8 @@
WAIT_SYNCHRO_FOR_WORKERS();
- __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
+ /* silent the check in the caller */
+ rte_atomic_store_explicit(&obj_count, 1, rte_memory_order_relaxed);
if (rte_eal_init(0, NULL) != -1)
return -1;
@@ -113,7 +115,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
if (rp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -178,7 +180,7 @@
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -244,7 +246,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_hash_create(&hash_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple times simultaneously */
@@ -311,7 +313,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_fbk_hash_create(&fbk_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -376,7 +378,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
if (lpm != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -437,8 +439,8 @@ struct test_case test_cases[] = {
if (pt_case->func == NULL)
return -1;
- __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&obj_count, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -448,7 +450,7 @@ struct test_case test_cases[] = {
rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
}
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
@@ -463,7 +465,7 @@ struct test_case test_cases[] = {
pt_case->clean(lcore_id);
}
- count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
+ count = rte_atomic_load_explicit(&obj_count, rte_memory_order_relaxed);
if (count != 1) {
printf("%s: common object allocated %d times (should be 1)\n",
pt_case->name, count);
diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
index ed9dd41..33d3147 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/app/test/test_hash_multiwriter.c
@@ -43,8 +43,8 @@ struct {
const uint32_t nb_total_tsx_insertion = 4.5*1024*1024;
uint32_t rounded_nb_total_tsx_insertion;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
static int use_htm;
@@ -84,8 +84,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)
tbl_multiwriter_test_params.keys[i]
@@ -166,8 +166,8 @@ struct {
tbl_multiwriter_test_params.found = found;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
/* Get list of enabled cores */
i = 0;
@@ -233,8 +233,8 @@ struct {
printf("No key corrupted during multiwriter insertion.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED)/
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed)/
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf(" cycles per insertion: %llu\n", cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 4997a01..1867376 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -45,14 +45,14 @@ struct {
struct rte_hash *h;
} tbl_rw_test_param;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
-static uint64_t gread_cycles;
-static uint64_t gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
-static uint64_t greads;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrites;
static int
test_hash_readwrite_worker(__rte_unused void *arg)
@@ -110,8 +110,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_rw_test_param.num_insert; i++)
tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
@@ -209,8 +209,8 @@ struct {
int worker_cnt = rte_lcore_count() - 1;
uint32_t tot_insert = 0;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
if (init_params(use_ext, use_htm, use_rw_lf, use_jhash) != 0)
goto err;
@@ -269,8 +269,8 @@ struct {
printf("No key corrupted during read-write test.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
@@ -310,8 +310,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, i, rte_memory_order_relaxed);
return 0;
}
@@ -344,9 +344,9 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rw_test_param.num_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rw_test_param.num_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -369,11 +369,11 @@ struct {
uint64_t start = 0, end = 0;
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
if (init_params(0, use_htm, 0, use_jhash) != 0)
goto err;
@@ -430,10 +430,10 @@ struct {
if (tot_worker_lcore < core_cnt[n] * 2)
goto finish;
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -475,8 +475,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_only[n] = cycles_per_insertion;
printf("Reader only: cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -484,17 +484,17 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->write_only[n] = cycles_per_insertion;
printf("Writer only: cycles per writes: %llu\n",
cycles_per_insertion);
}
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -569,8 +569,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_write_r[n] = cycles_per_insertion;
printf("Read-write cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -578,8 +578,8 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->read_write_w[n] = cycles_per_insertion;
printf("Read-write cycles per writes: %llu\n",
cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite_lf_perf.c b/app/test/test_hash_readwrite_lf_perf.c
index 5d18850..4523985 100644
--- a/app/test/test_hash_readwrite_lf_perf.c
+++ b/app/test/test_hash_readwrite_lf_perf.c
@@ -86,10 +86,10 @@ struct rwc_perf {
struct rte_hash *h;
} tbl_rwc_test_param;
-static uint64_t gread_cycles;
-static uint64_t greads;
-static uint64_t gwrite_cycles;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gwrites;
static volatile uint8_t writer_done;
@@ -651,8 +651,8 @@ struct rwc_perf {
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, read_cnt*loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, read_cnt*loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -724,8 +724,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -742,8 +742,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_hit[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -791,8 +791,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -811,8 +811,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_miss[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -861,8 +861,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -884,8 +884,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_nsp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -935,8 +935,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -958,8 +958,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_sp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1007,8 +1007,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1030,8 +1030,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
}
@@ -1087,9 +1087,9 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n",
rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0,
+ rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1127,10 +1127,10 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles,
- __ATOMIC_RELAXED) /
- __atomic_load_n(&greads,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles,
+ rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads,
+ rte_memory_order_relaxed);
rwc_perf_results->multi_rw[m][k][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n",
@@ -1178,8 +1178,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
write_type = WRITE_NO_KEY_SHIFT;
@@ -1210,8 +1210,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_extbkt[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1280,9 +1280,9 @@ struct rwc_perf {
tbl_rwc_test_param.keys_no_ks + i);
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rwc_test_param.single_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -1328,8 +1328,8 @@ struct rwc_perf {
rwc_core_cnt[n];
printf("\nNumber of writers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
rte_rcu_qsbr_init(rv, RTE_MAX_LCORE);
@@ -1364,8 +1364,8 @@ struct rwc_perf {
rte_eal_mp_wait_lcore();
unsigned long long cycles_per_write_operation =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
rwc_perf_results->writer_add_del[n]
= cycles_per_write_operation;
printf("Cycles per write operation: %llu\n",
diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index 3434a0d..bd5c0dd 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -10,6 +10,7 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_thread.h>
+#include <rte_stdatomic.h>
#include "test.h"
@@ -25,7 +26,7 @@ struct thread_context {
enum { Thread_INIT, Thread_ERROR, Thread_DONE } state;
bool lcore_id_any;
rte_thread_t id;
- unsigned int *registered_count;
+ RTE_ATOMIC(unsigned int) *registered_count;
};
static uint32_t thread_loop(void *arg)
@@ -49,10 +50,10 @@ static uint32_t thread_loop(void *arg)
t->state = Thread_ERROR;
}
/* Report register happened to the control thread. */
- __atomic_fetch_add(t->registered_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(t->registered_count, 1, rte_memory_order_release);
/* Wait for release from the control thread. */
- while (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(t->registered_count, rte_memory_order_acquire) != 0)
sched_yield();
rte_thread_unregister();
lcore_id = rte_lcore_id();
@@ -73,7 +74,7 @@ static uint32_t thread_loop(void *arg)
{
struct thread_context thread_contexts[RTE_MAX_LCORE];
unsigned int non_eal_threads_count;
- unsigned int registered_count;
+ RTE_ATOMIC(unsigned int) registered_count;
struct thread_context *t;
unsigned int i;
int ret;
@@ -93,7 +94,7 @@ static uint32_t thread_loop(void *arg)
}
printf("non-EAL threads count: %u\n", non_eal_threads_count);
/* Wait all non-EAL threads to register. */
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
@@ -109,14 +110,14 @@ static uint32_t thread_loop(void *arg)
if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
non_eal_threads_count++;
printf("non-EAL threads count: %u\n", non_eal_threads_count);
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
}
skip_lcore_any:
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -225,7 +226,7 @@ struct limit_lcore_context {
struct thread_context thread_contexts[2];
unsigned int non_eal_threads_count = 0;
struct limit_lcore_context l[2] = {};
- unsigned int registered_count = 0;
+ RTE_ATOMIC(unsigned int) registered_count = 0;
struct thread_context *t;
void *handle[2] = {};
unsigned int i;
@@ -275,7 +276,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 1 ||
@@ -298,7 +299,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 2 ||
@@ -315,7 +316,7 @@ struct limit_lcore_context {
}
rte_lcore_dump(stdout);
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -337,7 +338,7 @@ struct limit_lcore_context {
cleanup_threads:
/* Release all threads */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
rte_thread_join(t->id, NULL);
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
index 82daf9e..bc4bdde 100644
--- a/app/test/test_lpm_perf.c
+++ b/app/test/test_lpm_perf.c
@@ -22,8 +22,8 @@
struct rte_lpm *lpm;
static struct rte_rcu_qsbr *rv;
static volatile uint8_t writer_done;
-static volatile uint32_t thr_id;
-static uint64_t gwrite_cycles;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
static uint32_t num_writers;
/* LPM APIs are not thread safe, use spinlock */
@@ -362,7 +362,7 @@ static void generate_large_route_rule_table(void)
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -470,7 +470,7 @@ static void generate_large_route_rule_table(void)
total_cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, total_cycles, rte_memory_order_relaxed);
return 0;
@@ -540,9 +540,9 @@ static void generate_large_route_rule_table(void)
reader_f = test_lpm_reader;
writer_done = 0;
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
/* Launch reader threads */
for (i = j; i < num_cores; i++)
@@ -563,7 +563,7 @@ static void generate_large_route_rule_table(void)
printf("Total LPM Adds: %d\n", TOTAL_WRITES);
printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
printf("Average LPM Add/Del: %"PRIu64" cycles\n",
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed)
/ TOTAL_WRITES);
writer_done = 1;
diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c
index 46ff13c..8fcbc11 100644
--- a/app/test/test_mcslock.c
+++ b/app/test/test_mcslock.c
@@ -42,7 +42,7 @@
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_mcslock_per_core(__rte_unused void *arg)
@@ -75,7 +75,7 @@
rte_mcslock_t ml_perf_me;
/* wait synchro */
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -100,14 +100,14 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lock = 1;
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
@@ -116,11 +116,11 @@
printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index 96de347..35f0597 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -88,7 +88,7 @@
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
/* number of objects in one bulk operation (get or put) */
static unsigned n_get_bulk;
@@ -188,7 +188,8 @@ struct mempool_test_stats {
/* wait synchro for workers */
if (lcore_id != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
start_cycles = rte_get_timer_cycles();
@@ -233,7 +234,7 @@ struct mempool_test_stats {
int ret;
unsigned cores_save = cores;
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
/* reset stats */
memset(stats, 0, sizeof(stats));
@@ -258,7 +259,7 @@ struct mempool_test_stats {
}
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
ret = per_lcore_mempool_test(mp);
diff --git a/app/test/test_pflock.c b/app/test/test_pflock.c
index 5f77b15..d989a68 100644
--- a/app/test/test_pflock.c
+++ b/app/test/test_pflock.c
@@ -31,7 +31,7 @@
static rte_pflock_t sl;
static rte_pflock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_pflock_per_core(__rte_unused void *arg)
@@ -69,7 +69,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -99,7 +100,7 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -107,7 +108,7 @@
printf("\nTest with phase-fair lock on single core...\n");
lock = 1;
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -116,12 +117,12 @@
printf("\nPhase-fair test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index f6d97f2..46ae80d 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -537,7 +537,7 @@ enum {
return 0;
}
-static uint64_t start;
+static RTE_ATOMIC(uint64_t) start;
static inline int
poll_burst(void *args)
@@ -575,7 +575,7 @@ enum {
num[portid] = pkt_per_port;
}
- rte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_64((uint64_t *)(uintptr_t)&start, 1, rte_memory_order_acquire);
cur_tsc = rte_rdtsc();
while (total) {
@@ -629,9 +629,9 @@ enum {
/* only when polling first */
if (flags == SC_BURST_POLL_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_relaxed);
else
- __atomic_store_n(&start, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 0, rte_memory_order_relaxed);
/* start polling thread
* if in POLL_FIRST mode, poll once launched;
@@ -655,7 +655,7 @@ enum {
/* only when polling second */
if (flags == SC_BURST_XMIT_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_release);
/* wait for polling finished */
diff_tsc = rte_eal_wait_lcore(lcore);
diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c
index ce88a73..d1bf5c5 100644
--- a/app/test/test_rcu_qsbr_perf.c
+++ b/app/test/test_rcu_qsbr_perf.c
@@ -25,13 +25,15 @@
static uint32_t *hash_data[TOTAL_ENTRY];
static volatile uint8_t writer_done;
static volatile uint8_t all_registered;
-static volatile uint32_t thr_id;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
static struct rte_hash *h;
static char hash_name[8];
-static uint64_t updates, checks;
-static uint64_t update_cycles, check_cycles;
+static RTE_ATOMIC(uint64_t) updates;
+static RTE_ATOMIC(uint64_t) checks;
+static RTE_ATOMIC(uint64_t) update_cycles;
+static RTE_ATOMIC(uint64_t) check_cycles;
/* Scale down results to 1000 operations to support lower
* granularity clocks.
@@ -44,7 +46,7 @@
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -81,8 +83,8 @@
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
/* Make the thread offline */
rte_rcu_qsbr_thread_offline(t[0], thread_id);
@@ -113,8 +115,8 @@
} while (loop_cnt < 20000000);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -130,15 +132,15 @@
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
printf("\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\n",
num_cores - 1);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
if (all_registered == 1)
tmp_num_cores = num_cores - 1;
@@ -168,15 +170,16 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -193,10 +196,10 @@
size_t sz;
unsigned int i, tmp_num_cores;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf Test: %d Readers\n", num_cores);
@@ -220,11 +223,11 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -241,10 +244,10 @@
size_t sz;
unsigned int i;
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: %d Writers ('wait' in qsbr_check == false)\n",
num_cores);
@@ -266,10 +269,11 @@
/* Wait until all readers have exited */
rte_eal_mp_wait_lcore();
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -317,8 +321,8 @@
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
rte_rcu_qsbr_thread_unregister(temp, thread_id);
@@ -389,12 +393,12 @@ static struct rte_hash *init_hash(void)
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\n", num_cores);
@@ -453,8 +457,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
@@ -467,12 +471,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -511,7 +515,7 @@ static struct rte_hash *init_hash(void)
printf("Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\n", num_cores);
- __atomic_store_n(&thr_id, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_relaxed);
if (all_registered == 1)
tmp_num_cores = num_cores;
@@ -570,8 +574,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
/* Wait and check return value from reader threads */
@@ -583,12 +587,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -622,10 +626,10 @@ static struct rte_hash *init_hash(void)
return TEST_SKIPPED;
}
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
num_cores = 0;
RTE_LCORE_FOREACH_WORKER(core_id) {
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index d7c5a4c..6d7a0a8 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -186,7 +186,7 @@ struct thread_params {
void *burst = NULL;
#ifdef RTE_USE_C11_MEM_MODEL
- if (__atomic_fetch_add(&lcore_count, 1, __ATOMIC_RELAXED) + 1 != 2)
+ if (rte_atomic_fetch_add_explicit(&lcore_count, 1, rte_memory_order_relaxed) + 1 != 2)
#else
if (__sync_add_and_fetch(&lcore_count, 1) != 2)
#endif
@@ -320,7 +320,7 @@ struct thread_params {
return 0;
}
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static uint64_t queue_count[RTE_MAX_LCORE];
#define TIME_MS 100
@@ -342,7 +342,8 @@ struct thread_params {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (time_diff < hz * TIME_MS / 1000) {
@@ -397,12 +398,12 @@ struct thread_params {
param.r = r;
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lcore_f(¶m);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_ring_stress_impl.h b/app/test/test_ring_stress_impl.h
index 2dec897..e6b23c0 100644
--- a/app/test/test_ring_stress_impl.h
+++ b/app/test/test_ring_stress_impl.h
@@ -24,7 +24,7 @@ enum {
WRK_CMD_RUN,
};
-static uint32_t wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;
+static RTE_ATOMIC(uint32_t) wrk_cmd __rte_cache_aligned = WRK_CMD_STOP;
/* test run-time in seconds */
static const uint32_t run_time = 60;
@@ -203,7 +203,7 @@ struct ring_elem {
* really releasing any data through 'wrk_cmd' to
* the worker.
*/
- while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) != WRK_CMD_RUN)
+ while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) != WRK_CMD_RUN)
rte_pause();
cl = rte_rdtsc_precise();
@@ -246,7 +246,7 @@ struct ring_elem {
lcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);
- } while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) == WRK_CMD_RUN);
+ } while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) == WRK_CMD_RUN);
cl = rte_rdtsc_precise() - cl;
if (prcs == 0)
@@ -360,12 +360,12 @@ struct ring_elem {
}
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_RUN, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_RUN, rte_memory_order_release);
rte_delay_us(run_time * US_PER_S);
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_STOP, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_STOP, rte_memory_order_release);
/* wait for workers and collect stats. */
mc = rte_lcore_id();
diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index 5079895..f67fc35 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -35,7 +35,7 @@
static rte_rwlock_t sl;
static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
enum {
LC_TYPE_RDLOCK,
@@ -101,7 +101,8 @@ struct try_rwlock_lcore {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -134,12 +135,12 @@ struct try_rwlock_lcore {
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(NULL);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c
index 873bd60..7455bac 100644
--- a/app/test/test_seqlock.c
+++ b/app/test/test_seqlock.c
@@ -22,7 +22,7 @@ struct data {
struct reader {
struct data *data;
- uint8_t stop;
+ RTE_ATOMIC(uint8_t) stop;
};
#define WRITER_RUNTIME 2.0 /* s */
@@ -79,7 +79,7 @@ struct reader {
struct reader *r = arg;
int rc = TEST_SUCCESS;
- while (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&
+ while (rte_atomic_load_explicit(&r->stop, rte_memory_order_relaxed) == 0 &&
rc == TEST_SUCCESS) {
struct data *data = r->data;
bool interrupted;
@@ -115,7 +115,7 @@ struct reader {
static void
reader_stop(struct reader *reader)
{
- __atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&reader->stop, 1, rte_memory_order_relaxed);
}
#define NUM_WRITERS 2 /* main lcore + one worker */
diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c
index c12d52d..010ab82 100644
--- a/app/test/test_service_cores.c
+++ b/app/test/test_service_cores.c
@@ -59,15 +59,15 @@ static int32_t dummy_mt_unsafe_cb(void *args)
* test, because two threads are concurrently in a non-MT safe callback.
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so clear pass flag */
*pass_test = 0;
@@ -86,15 +86,15 @@ static int32_t dummy_mt_safe_cb(void *args)
* that 2 threads are running the callback at the same time: MT safe
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so set pass flag */
*pass_test = 1;
@@ -748,15 +748,15 @@ static int32_t dummy_mt_safe_cb(void *args)
/* retrieve done flag and lock to add/sub */
uint32_t *done = ¶ms[0];
- uint32_t *lock = ¶ms[1];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)¶ms[1];
while (!*done) {
- __atomic_fetch_add(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(lock, 1, rte_memory_order_relaxed);
rte_delay_us(500);
- if (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1)
+ if (rte_atomic_load_explicit(lock, rte_memory_order_relaxed) > 1)
/* pass: second core has simultaneously incremented */
*done = 1;
- __atomic_fetch_sub(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(lock, 1, rte_memory_order_relaxed);
}
return 0;
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 9a481f2..a29405a 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -48,7 +48,7 @@
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_spinlock_per_core(__rte_unused void *arg)
@@ -110,7 +110,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -149,11 +150,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c
index c5e1caa..3f17a26 100644
--- a/app/test/test_stack_perf.c
+++ b/app/test/test_stack_perf.c
@@ -23,7 +23,7 @@
*/
static volatile unsigned int bulk_sizes[] = {8, MAX_BURST};
-static uint32_t lcore_barrier;
+static RTE_ATOMIC(uint32_t) lcore_barrier;
struct lcore_pair {
unsigned int c1;
@@ -143,8 +143,8 @@ struct thread_args {
s = args->s;
size = args->sz;
- __atomic_fetch_sub(&lcore_barrier, 1, __ATOMIC_RELAXED);
- rte_wait_until_equal_32(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&lcore_barrier, 1, rte_memory_order_relaxed);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&lcore_barrier, 0, rte_memory_order_relaxed);
uint64_t start = rte_rdtsc();
@@ -173,7 +173,7 @@ struct thread_args {
unsigned int i;
for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
- __atomic_store_n(&lcore_barrier, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 2, rte_memory_order_relaxed);
args[0].sz = args[1].sz = bulk_sizes[i];
args[0].s = args[1].s = s;
@@ -206,7 +206,7 @@ struct thread_args {
int cnt = 0;
double avg;
- __atomic_store_n(&lcore_barrier, n, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, n, rte_memory_order_relaxed);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (++cnt >= n)
@@ -300,7 +300,7 @@ struct thread_args {
struct lcore_pair cores;
struct rte_stack *s;
- __atomic_store_n(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 0, rte_memory_order_relaxed);
s = rte_stack_create(STACK_NAME, STACK_SIZE, rte_socket_id(), flags);
if (s == NULL) {
diff --git a/app/test/test_threads.c b/app/test/test_threads.c
index 4ac3f26..6d6881a 100644
--- a/app/test/test_threads.c
+++ b/app/test/test_threads.c
@@ -6,12 +6,13 @@
#include <rte_thread.h>
#include <rte_debug.h>
+#include <rte_stdatomic.h>
#include "test.h"
RTE_LOG_REGISTER(threads_logtype_test, test.threads, INFO);
-static uint32_t thread_id_ready;
+static RTE_ATOMIC(uint32_t) thread_id_ready;
static uint32_t
thread_main(void *arg)
@@ -19,9 +20,9 @@
if (arg != NULL)
*(rte_thread_t *)arg = rte_thread_self();
- __atomic_store_n(&thread_id_ready, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 1, rte_memory_order_release);
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 1)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 1)
;
return 0;
@@ -37,13 +38,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
@@ -61,13 +62,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main,
&thread_main_id) == 0, "Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_detach(thread_id) == 0,
"Failed to detach thread.");
@@ -85,7 +86,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
priority = RTE_THREAD_PRIORITY_NORMAL;
@@ -121,7 +122,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Priority set mismatches priority get");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -137,7 +138,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset0) == 0,
@@ -190,7 +191,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes affinity thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset1) == 0,
@@ -198,7 +199,7 @@
RTE_TEST_ASSERT(memcmp(&cpuset0, &cpuset1, sizeof(rte_cpuset_t)) == 0,
"Failed to apply affinity attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -219,7 +220,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes priority thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_priority(thread_id, &priority) == 0,
@@ -227,7 +228,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Failed to apply priority attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -243,13 +244,13 @@
thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
index 1fbbedb..9b6b584 100644
--- a/app/test/test_ticketlock.c
+++ b/app/test/test_ticketlock.c
@@ -48,7 +48,7 @@
static rte_ticketlock_recursive_t tlr;
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_ticketlock_per_core(__rte_unused void *arg)
@@ -111,7 +111,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcore_count[lcore] < MAX_LOOP) {
@@ -153,11 +154,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index cac8fc0..dc15a80 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -202,7 +202,7 @@ struct mytimerinfo {
/* Need to synchronize worker lcores through multiple steps. */
enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
-static uint16_t lcore_state[RTE_MAX_LCORE];
+static RTE_ATOMIC(uint16_t) lcore_state[RTE_MAX_LCORE];
static void
main_init_workers(void)
@@ -210,7 +210,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_WAITING, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_WAITING,
+ rte_memory_order_relaxed);
}
}
@@ -220,10 +221,12 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_RUN_SIGNAL, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL,
+ rte_memory_order_release);
}
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_RUNNING,
+ rte_memory_order_acquire);
}
}
@@ -233,7 +236,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_FINISHED,
+ rte_memory_order_acquire);
}
}
@@ -242,8 +246,10 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- rte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, __ATOMIC_ACQUIRE);
- __atomic_store_n(&lcore_state[lcore_id], WORKER_RUNNING, __ATOMIC_RELEASE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[lcore_id], WORKER_RUN_SIGNAL,
+ rte_memory_order_acquire);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING,
+ rte_memory_order_release);
}
static void
@@ -251,7 +257,8 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- __atomic_store_n(&lcore_state[lcore_id], WORKER_FINISHED, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED,
+ rte_memory_order_release);
}
@@ -277,12 +284,12 @@ struct mytimerinfo {
unsigned int lcore_id = rte_lcore_id();
unsigned int main_lcore = rte_get_main_lcore();
int32_t my_collisions = 0;
- static uint32_t collisions;
+ static RTE_ATOMIC(uint32_t) collisions;
if (lcore_id == main_lcore) {
cb_count = 0;
test_failed = 0;
- __atomic_store_n(&collisions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&collisions, 0, rte_memory_order_relaxed);
timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
if (timers == NULL) {
printf("Test Failed\n");
@@ -310,7 +317,7 @@ struct mytimerinfo {
my_collisions++;
}
if (my_collisions != 0)
- __atomic_fetch_add(&collisions, my_collisions, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&collisions, my_collisions, rte_memory_order_relaxed);
/* wait long enough for timers to expire */
rte_delay_ms(100);
@@ -324,7 +331,7 @@ struct mytimerinfo {
/* now check that we get the right number of callbacks */
if (lcore_id == main_lcore) {
- my_collisions = __atomic_load_n(&collisions, __ATOMIC_RELAXED);
+ my_collisions = rte_atomic_load_explicit(&collisions, rte_memory_order_relaxed);
if (my_collisions != 0)
printf("- %d timer reset collisions (OK)\n", my_collisions);
rte_timer_manage();
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 42/45] app/test-eventdev: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (40 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 41/45] app/test: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 43/45] app/test-crypto-perf: " Tyler Retzlaff
` (3 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-eventdev/test_order_atq.c | 4 ++--
app/test-eventdev/test_order_common.c | 5 +++--
app/test-eventdev/test_order_common.h | 8 ++++----
app/test-eventdev/test_order_queue.c | 4 ++--
app/test-eventdev/test_perf_common.h | 6 +++---
5 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 2fee4b4..128d3f2 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index a9894c6..0fceace 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -189,7 +189,7 @@
evt_err("failed to allocate t->expected_flow_seq memory");
goto exp_nomem;
}
- __atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
t->err = false;
t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
@@ -296,7 +296,8 @@
while (t->err == false) {
uint64_t new_cycles = rte_get_timer_cycles();
- int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
+ int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
+ rte_memory_order_relaxed);
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index 1507265..65878d1 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -48,7 +48,7 @@ struct test_order {
* The atomic_* is an expensive operation,Since it is a functional test,
* We are using the atomic_ operation to reduce the code complexity.
*/
- uint64_t outstand_pkts;
+ RTE_ATOMIC(uint64_t) outstand_pkts;
enum evt_test_result result;
uint32_t nb_flows;
uint64_t nb_pkts;
@@ -95,7 +95,7 @@ struct test_order {
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
- uint64_t *const outstand_pkts)
+ RTE_ATOMIC(uint64_t) *const outstand_pkts)
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
@@ -113,7 +113,7 @@ struct test_order {
*/
expected_flow_seq[flow]++;
rte_pktmbuf_free(ev->mbuf);
- __atomic_fetch_sub(outstand_pkts, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -132,7 +132,7 @@ struct test_order {
const uint8_t port = w->port_id;\
const uint32_t nb_flows = t->nb_flows;\
uint32_t *expected_flow_seq = t->expected_flow_seq;\
- uint64_t *outstand_pkts = &t->outstand_pkts;\
+ RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
if (opt->verbose_level > 1)\
printf("%s(): lcore %d dev_id %d port=%d\n",\
__func__, rte_lcore_id(), dev_id, port)
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 80eaea5..a282ab2 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index 2b4f572..7f7c823 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -225,7 +225,7 @@ struct perf_elt {
* stored before updating the number of
* processed packets for worker lcores
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -270,7 +270,7 @@ struct perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -325,7 +325,7 @@ struct perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts += vec->nb_elem;
if (enable_fwd_latency) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 43/45] app/test-crypto-perf: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (41 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 42/45] app/test-eventdev: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 44/45] app/test-compress-perf: " Tyler Retzlaff
` (2 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-crypto-perf/cperf_test_latency.c | 6 +++---
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +++++-----
app/test-crypto-perf/cperf_test_throughput.c | 10 +++++-----
app/test-crypto-perf/cperf_test_verify.c | 10 +++++-----
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 99b7d7c..b8ad6bf 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -136,7 +136,7 @@ struct priv_op_data {
uint32_t imix_idx = 0;
int ret = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
if (ctx == NULL)
return 0;
@@ -341,8 +341,8 @@ struct priv_op_data {
uint16_t exp = 0;
if (ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
"cycles, time (us)");
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 4a60f6d..7191d99 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -396,7 +396,7 @@ struct pmd_cyclecount_state {
state.lcore = rte_lcore_id();
state.linearize = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
static bool warmup = true;
/*
@@ -443,8 +443,8 @@ struct pmd_cyclecount_state {
uint16_t exp = 0;
if (!opts->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
@@ -460,8 +460,8 @@ struct pmd_cyclecount_state {
state.cycles_per_enq,
state.cycles_per_deq);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index e3d266d..c0891e7 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -107,7 +107,7 @@ struct cperf_throughput_ctx {
uint8_t burst_size_idx = 0;
uint32_t imix_idx = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
@@ -277,8 +277,8 @@ struct cperf_throughput_ctx {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst Size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -298,8 +298,8 @@ struct cperf_throughput_ctx {
throughput_gbps,
cycles_per_packet);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 3548509..222c7a1 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -216,7 +216,7 @@ struct cperf_op_result {
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
uint64_t ops_failed = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint64_t i;
uint16_t ops_unused = 0;
@@ -370,8 +370,8 @@ struct cperf_op_result {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -388,8 +388,8 @@ struct cperf_op_result {
ops_deqd_failed,
ops_failed);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore id, Buffer Size(B), "
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Failed Ops\n");
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 44/45] app/test-compress-perf: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (42 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 43/45] app/test-crypto-perf: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-27 22:37 ` [PATCH v3 45/45] app/test-bbdev: " Tyler Retzlaff
2024-03-29 2:07 ` [PATCH v3 00/45] use " Tyler Retzlaff
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 ++--
app/test-compress-perf/comp_perf_test_throughput.c | 10 +++++-----
app/test-compress-perf/comp_perf_test_verify.c | 6 +++---
4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/app/test-compress-perf/comp_perf_test_common.h b/app/test-compress-perf/comp_perf_test_common.h
index d039e5a..085e269 100644
--- a/app/test-compress-perf/comp_perf_test_common.h
+++ b/app/test-compress-perf/comp_perf_test_common.h
@@ -14,7 +14,7 @@ struct cperf_mem_resources {
uint16_t qp_id;
uint8_t lcore_id;
- uint16_t print_info_once;
+ RTE_ATOMIC(uint16_t) print_info_once;
uint32_t total_bufs;
uint8_t *compressed_data;
diff --git a/app/test-compress-perf/comp_perf_test_cyclecount.c b/app/test-compress-perf/comp_perf_test_cyclecount.c
index 4d336ec..64e8faa 100644
--- a/app/test-compress-perf/comp_perf_test_cyclecount.c
+++ b/app/test-compress-perf/comp_perf_test_cyclecount.c
@@ -498,8 +498,8 @@ struct cperf_cyclecount_ctx {
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
diff --git a/app/test-compress-perf/comp_perf_test_throughput.c b/app/test-compress-perf/comp_perf_test_throughput.c
index 1f7072d..089d19c 100644
--- a/app/test-compress-perf/comp_perf_test_throughput.c
+++ b/app/test-compress-perf/comp_perf_test_throughput.c
@@ -336,7 +336,7 @@
struct cperf_benchmark_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->ver.options;
uint32_t lcore = rte_lcore_id();
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
int i, ret = EXIT_SUCCESS;
ctx->ver.mem.lcore_id = lcore;
@@ -345,8 +345,8 @@
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
@@ -413,8 +413,8 @@
}
exp = 0;
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("\n%12s%6s%12s%17s%15s%16s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]",
"Comp [Gbps]", "Decomp [Gbps]");
diff --git a/app/test-compress-perf/comp_perf_test_verify.c b/app/test-compress-perf/comp_perf_test_verify.c
index 7bd1807..09d97c5 100644
--- a/app/test-compress-perf/comp_perf_test_verify.c
+++ b/app/test-compress-perf/comp_perf_test_verify.c
@@ -396,7 +396,7 @@
struct cperf_verify_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->options;
int ret = EXIT_SUCCESS;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint32_t lcore = rte_lcore_id();
uint16_t exp = 0;
@@ -452,8 +452,8 @@
test_data->input_data_sz * 100;
if (!ctx->silent) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("%12s%6s%12s%17s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]");
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v3 45/45] app/test-bbdev: use rte stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (43 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 44/45] app/test-compress-perf: " Tyler Retzlaff
@ 2024-03-27 22:37 ` Tyler Retzlaff
2024-03-29 2:07 ` [PATCH v3 00/45] use " Tyler Retzlaff
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-27 22:37 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++++++++++++----------------
1 file changed, 110 insertions(+), 73 deletions(-)
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index dcce00a..9694ed3 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -144,7 +144,7 @@ struct test_op_params {
uint16_t num_to_process;
uint16_t num_lcores;
int vector_mask;
- uint16_t sync;
+ RTE_ATOMIC(uint16_t) sync;
struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
};
@@ -159,9 +159,9 @@ struct thread_params {
uint8_t iter_count;
double iter_average;
double bler;
- uint16_t nb_dequeued;
- int16_t processing_status;
- uint16_t burst_sz;
+ RTE_ATOMIC(uint16_t) nb_dequeued;
+ RTE_ATOMIC(int16_t) processing_status;
+ RTE_ATOMIC(uint16_t) burst_sz;
struct test_op_params *op_params;
struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
@@ -3195,56 +3195,64 @@ typedef int (test_case_function)(struct active_device *ad,
}
if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
printf(
"Dequeue interrupt handler called for incorrect event!\n");
return;
}
- burst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);
+ burst_sz = rte_atomic_load_explicit(&tp->burst_sz, rte_memory_order_relaxed);
num_ops = tp->op_params->num_to_process;
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
deq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,
&tp->fft_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
deq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
&tp->mldts_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else /*RTE_BBDEV_OP_TURBO_ENC*/
deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
if (deq < burst_sz) {
printf(
"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
burst_sz, deq);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
- if (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) + deq < num_ops) {
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
return;
}
@@ -3288,7 +3296,8 @@ typedef int (test_case_function)(struct active_device *ad,
if (ret) {
printf("Buffers validation failed\n");
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
}
switch (test_vector.op_type) {
@@ -3315,7 +3324,8 @@ typedef int (test_case_function)(struct active_device *ad,
break;
default:
printf("Unknown op type: %d\n", test_vector.op_type);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
@@ -3324,7 +3334,7 @@ typedef int (test_case_function)(struct active_device *ad,
tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
((double)total_time / (double)rte_get_tsc_hz());
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
}
static int
@@ -3362,10 +3372,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3415,15 +3426,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3459,10 +3472,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3506,15 +3520,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3549,10 +3565,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3592,15 +3609,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3636,10 +3655,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3681,15 +3701,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3725,10 +3747,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3769,15 +3792,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3811,10 +3836,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, num_to_process);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_to_process);
@@ -3851,15 +3877,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3894,7 +3922,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4013,7 +4042,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4148,7 +4178,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4271,7 +4302,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4402,7 +4434,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4503,7 +4536,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4604,7 +4638,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4702,7 +4737,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4898,7 +4934,7 @@ typedef int (test_case_function)(struct active_device *ad,
else
return TEST_SKIPPED;
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -4921,7 +4957,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = bler_function(&t_params[0]);
/* Main core is always used */
@@ -5024,7 +5060,7 @@ typedef int (test_case_function)(struct active_device *ad,
throughput_function = throughput_pmd_lcore_enc;
}
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -5047,7 +5083,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = throughput_function(&t_params[0]);
/* Main core is always used */
@@ -5077,29 +5113,30 @@ typedef int (test_case_function)(struct active_device *ad,
* Wait for main lcore operations.
*/
tp = &t_params[0];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+ (rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed) !=
TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed);
/* Wait for worker lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp = &t_params[used_cores];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
- TEST_FAILED))
+ (rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed) != TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed);
}
/* Print throughput if test passed */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v3 00/45] use stdatomic API
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
` (44 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 45/45] app/test-bbdev: " Tyler Retzlaff
@ 2024-03-29 2:07 ` Tyler Retzlaff
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-03-29 2:07 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Ziyang Xuan
Recheck-request: iol-unit-amd64-testing
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 00/45] use stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (48 preceding siblings ...)
2024-03-27 22:37 ` [PATCH v3 00/45] use " Tyler Retzlaff
@ 2024-04-19 23:05 ` Tyler Retzlaff
2024-04-19 23:05 ` [PATCH v4 01/45] net/mlx5: use rte " Tyler Retzlaff
` (44 more replies)
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
51 siblings, 45 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:05 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
s series converts all non-generic built atomics to use the rte_atomic
macros that allow optional enablement of standard C11 atomics.
Use of generic atomics for non-scalar types are not converted in this
change and will be evaluated as a part of a separate series.
Note if this series ends up requiring too much rebasing due to tree
churn before it is merged i will break it up into smaller series.
v4:
* rebase after merge of move alignment attribute on types for MSVC,
no other changes.
v3:
* event/dsw, wrap all lines <= 80 chars, align arguments to
opening parenthesis.
* event/dlb2, wrap changed lines <= 80 chars, remove comments
referencing gcc __atomic built-ins.
* bus/vmbus, remove comment referencing gcc atomic built-ins,
fix mistake where monitor_mask was declared RTE_ATOMIC(uint32_t),
fix mistake where pending was not declared RTE_ATOMIC(uint32_t),
remove now unnecessary cast to __rte_atomic of pending (since
the field is now properly declare RTE_ATOMIC).
v2:
* drop the net/sfc driver from the series. the sfc driver
uses generic __atomic_store not handled by the current macros.
the cases where generic __atomic_xxx are used on objects that
can't be accepted by __atomic_xxx_n will be addressed in a
separate series.
Tyler Retzlaff (45):
net/mlx5: use rte stdatomic API
net/ixgbe: use rte stdatomic API
net/iavf: use rte stdatomic API
net/ice: use rte stdatomic API
net/i40e: use rte stdatomic API
net/hns3: use rte stdatomic API
net/bnxt: use rte stdatomic API
net/cpfl: use rte stdatomic API
net/af_xdp: use rte stdatomic API
net/octeon_ep: use rte stdatomic API
net/octeontx: use rte stdatomic API
net/cxgbe: use rte stdatomic API
net/gve: use rte stdatomic API
net/memif: use rte stdatomic API
net/thunderx: use rte stdatomic API
net/virtio: use rte stdatomic API
net/hinic: use rte stdatomic API
net/idpf: use rte stdatomic API
net/qede: use rte stdatomic API
net/ring: use rte stdatomic API
vdpa/mlx5: use rte stdatomic API
raw/ifpga: use rte stdatomic API
event/opdl: use rte stdatomic API
event/octeontx: use rte stdatomic API
event/dsw: use rte stdatomic API
dma/skeleton: use rte stdatomic API
crypto/octeontx: use rte stdatomic API
common/mlx5: use rte stdatomic API
common/idpf: use rte stdatomic API
common/iavf: use rte stdatomic API
baseband/acc: use rte stdatomic API
net/txgbe: use rte stdatomic API
net/null: use rte stdatomic API
event/dlb2: use rte stdatomic API
dma/idxd: use rte stdatomic API
crypto/ccp: use rte stdatomic API
common/cpt: use rte stdatomic API
bus/vmbus: use rte stdatomic API
examples: use rte stdatomic API
app/dumpcap: use rte stdatomic API
app/test: use rte stdatomic API
app/test-eventdev: use rte stdatomic API
app/test-crypto-perf: use rte stdatomic API
app/test-compress-perf: use rte stdatomic API
app/test-bbdev: use rte stdatomic API
app/dumpcap/main.c | 12 +-
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++--------
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 +-
app/test-compress-perf/comp_perf_test_throughput.c | 10 +-
app/test-compress-perf/comp_perf_test_verify.c | 6 +-
app/test-crypto-perf/cperf_test_latency.c | 6 +-
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +-
app/test-crypto-perf/cperf_test_throughput.c | 10 +-
app/test-crypto-perf/cperf_test_verify.c | 10 +-
app/test-eventdev/test_order_atq.c | 4 +-
app/test-eventdev/test_order_common.c | 5 +-
app/test-eventdev/test_order_common.h | 8 +-
app/test-eventdev/test_order_queue.c | 4 +-
app/test-eventdev/test_perf_common.h | 6 +-
app/test/test_bpf.c | 46 ++++--
app/test/test_distributor.c | 114 ++++++-------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++--
app/test/test_hash_multiwriter.c | 16 +-
app/test/test_hash_readwrite.c | 74 ++++-----
app/test/test_hash_readwrite_lf_perf.c | 88 +++++-----
app/test/test_lcores.c | 25 +--
app/test/test_lpm_perf.c | 14 +-
app/test/test_mcslock.c | 12 +-
app/test/test_mempool_perf.c | 9 +-
app/test/test_pflock.c | 13 +-
app/test/test_pmd_perf.c | 10 +-
app/test/test_rcu_qsbr_perf.c | 114 ++++++-------
app/test/test_ring_perf.c | 11 +-
app/test/test_ring_stress_impl.h | 10 +-
app/test/test_rwlock.c | 9 +-
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +--
app/test/test_spinlock.c | 9 +-
app/test/test_stack_perf.c | 12 +-
app/test/test_threads.c | 33 ++--
app/test/test_ticketlock.c | 9 +-
app/test/test_timer.c | 31 ++--
drivers/baseband/acc/rte_acc100_pmd.c | 36 ++--
drivers/baseband/acc/rte_vrb_pmd.c | 46 ++++--
drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
drivers/bus/vmbus/vmbus_channel.c | 8 +-
drivers/common/cpt/cpt_common.h | 2 +-
drivers/common/iavf/iavf_impl.c | 4 +-
drivers/common/idpf/idpf_common_device.h | 6 +-
drivers/common/idpf/idpf_common_rxtx.c | 14 +-
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 +-
drivers/common/mlx5/linux/mlx5_nl.c | 5 +-
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 +-
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 ++--
drivers/common/mlx5/mlx5_common_utils.h | 6 +-
drivers/common/mlx5/mlx5_malloc.c | 58 +++----
drivers/crypto/ccp/ccp_dev.c | 8 +-
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 +-
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +-
drivers/dma/skeleton/skeleton_dmadev.c | 5 +-
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
drivers/event/dlb2/dlb2.c | 34 ++--
drivers/event/dlb2/dlb2_priv.h | 13 +-
drivers/event/dlb2/dlb2_xstats.c | 2 +-
drivers/event/dsw/dsw_evdev.h | 6 +-
drivers/event/dsw/dsw_event.c | 47 ++++--
drivers/event/dsw/dsw_xstats.c | 4 +-
drivers/event/octeontx/timvf_evdev.h | 8 +-
drivers/event/octeontx/timvf_worker.h | 36 ++--
drivers/event/opdl/opdl_ring.c | 80 ++++-----
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 ++-
drivers/net/bnxt/bnxt_cpr.h | 4 +-
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 +-
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 +-
drivers/net/cpfl/cpfl_ethdev.c | 8 +-
drivers/net/cxgbe/clip_tbl.c | 12 +-
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 +--
drivers/net/cxgbe/cxgbe_ofld.h | 6 +-
drivers/net/cxgbe/l2t.c | 12 +-
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +--
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 +-
drivers/net/cxgbe/smt.h | 2 +-
drivers/net/gve/base/gve_osdep.h | 4 +-
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
drivers/net/hns3/hns3_cmd.c | 18 +-
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 ++--
drivers/net/hns3/hns3_ethdev.h | 32 ++--
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++----
drivers/net/hns3/hns3_intr.c | 36 ++--
drivers/net/hns3/hns3_intr.h | 4 +-
drivers/net/hns3/hns3_mbx.c | 6 +-
drivers/net/hns3/hns3_mp.c | 6 +-
drivers/net/hns3/hns3_rxtx.c | 10 +-
drivers/net/hns3/hns3_tm.c | 4 +-
drivers/net/i40e/i40e_ethdev.c | 4 +-
drivers/net/i40e/i40e_rxtx.c | 6 +-
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf.h | 16 +-
drivers/net/iavf/iavf_rxtx.c | 4 +-
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +-
drivers/net/ice/base/ice_osdep.h | 4 +-
drivers/net/ice/ice_dcf.c | 6 +-
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 +-
drivers/net/ice/ice_dcf_parent.c | 16 +-
drivers/net/ice/ice_ethdev.c | 12 +-
drivers/net/ice/ice_ethdev.h | 2 +-
drivers/net/idpf/idpf_ethdev.c | 7 +-
drivers/net/ixgbe/ixgbe_ethdev.c | 14 +-
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 +-
drivers/net/memif/memif.h | 4 +-
drivers/net/memif/rte_eth_memif.c | 50 +++---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 +-
drivers/net/mlx5/mlx5.c | 9 +-
drivers/net/mlx5/mlx5.h | 66 ++++----
drivers/net/mlx5/mlx5_flow.c | 37 +++--
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 +++--
drivers/net/mlx5/mlx5_flow_dv.c | 126 +++++++-------
drivers/net/mlx5/mlx5_flow_flex.c | 14 +-
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++----
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++--
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++--
drivers/net/mlx5/mlx5_hws_cnt.c | 71 ++++----
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +-
drivers/net/mlx5/mlx5_rx.h | 14 +-
drivers/net/mlx5/mlx5_rxq.c | 30 ++--
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 +-
drivers/net/mlx5/mlx5_txpp.c | 84 +++++-----
drivers/net/mlx5/mlx5_txq.c | 12 +-
drivers/net/mlx5/mlx5_utils.c | 10 +-
drivers/net/mlx5/mlx5_utils.h | 4 +-
drivers/net/null/rte_eth_null.c | 12 +-
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +-
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +-
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx2_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx_ep_common.h | 4 +-
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 +-
drivers/net/octeontx/octeontx_ethdev.c | 8 +-
drivers/net/qede/base/bcm_osal.c | 6 +-
drivers/net/ring/rte_eth_ring.c | 8 +-
drivers/net/thunderx/nicvf_rxtx.c | 9 +-
drivers/net/thunderx/nicvf_struct.h | 4 +-
drivers/net/txgbe/txgbe_ethdev.c | 12 +-
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
drivers/net/virtio/virtio_ring.h | 4 +-
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 +-
drivers/net/virtio/virtqueue.h | 32 ++--
drivers/raw/ifpga/ifpga_rawdev.c | 9 +-
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +--
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +-
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++---
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 +-
examples/bbdev_app/main.c | 13 +-
examples/l2fwd-event/l2fwd_common.h | 4 +-
examples/l2fwd-event/l2fwd_event.c | 24 +--
examples/l2fwd-jobstats/main.c | 11 +-
.../client_server_mp/mp_server/main.c | 6 +-
examples/server_node_efd/efd_server/main.c | 6 +-
examples/vhost/main.c | 32 ++--
examples/vhost/main.h | 4 +-
examples/vhost/virtio_net.c | 13 +-
examples/vhost_blk/vhost_blk.c | 8 +-
examples/vm_power_manager/channel_monitor.c | 9 +-
180 files changed, 1641 insertions(+), 1500 deletions(-)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 01/45] net/mlx5: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
@ 2024-04-19 23:05 ` Tyler Retzlaff
2024-04-20 8:03 ` Morten Brørup
2024-04-19 23:06 ` [PATCH v4 02/45] net/ixgbe: " Tyler Retzlaff
` (43 subsequent siblings)
44 siblings, 1 reply; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:05 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 ++-
drivers/net/mlx5/mlx5.c | 9 ++-
drivers/net/mlx5/mlx5.h | 66 ++++++++---------
drivers/net/mlx5/mlx5_flow.c | 37 +++++-----
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 ++++++-----
drivers/net/mlx5/mlx5_flow_dv.c | 126 ++++++++++++++++----------------
drivers/net/mlx5/mlx5_flow_flex.c | 14 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++++++++-------
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++++----
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++++----
drivers/net/mlx5/mlx5_hws_cnt.c | 71 +++++++++---------
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +--
drivers/net/mlx5/mlx5_rx.h | 14 ++--
drivers/net/mlx5/mlx5_rxq.c | 30 ++++----
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 ++---
drivers/net/mlx5/mlx5_txpp.c | 84 ++++++++++-----------
drivers/net/mlx5/mlx5_txq.c | 12 +--
drivers/net/mlx5/mlx5_utils.c | 10 +--
drivers/net/mlx5/mlx5_utils.h | 4 +-
22 files changed, 351 insertions(+), 345 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index 40ea9d2..70bba6c 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -1918,9 +1918,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
return -ENOTSUP;
}
/* Check there is no concurrent mapping in other thread. */
- if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
- base, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
+ base,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
return 0;
}
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index b54f3cc..63da8f4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1117,7 +1117,7 @@
return 0;
}
/* Only need to check refcnt, 0 after "sh" is allocated. */
- if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+ if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
priv->lb_used = 1;
return 0;
@@ -1163,7 +1163,7 @@
claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
sh->self_lb.ibv_cq = NULL;
}
- __atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
return -rte_errno;
#else
RTE_SET_USED(dev);
@@ -1186,8 +1186,9 @@
if (!priv->lb_used)
return;
- MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
- if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {
+ MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
+ if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
if (sh->self_lb.qp) {
claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
sh->self_lb.qp = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d1a6382..2ff94db 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -855,8 +855,8 @@
ct_pool = mng->pools[idx];
for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
ct = &ct_pool->actions[i];
- val = __atomic_fetch_sub(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
MLX5_ASSERT(val == 1);
if (val > 1)
cnt++;
@@ -1082,7 +1082,8 @@
DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
return -ENOTSUP;
}
- if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
+ if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
+ rte_memory_order_relaxed) + 1 > 1)
return 0;
priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
@@ -1173,7 +1174,7 @@
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
- if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
return;
mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
mlx5_free(fp->flex.devx_fp);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3646d20..9e4a5fe 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -378,7 +378,7 @@ struct mlx5_drop {
struct mlx5_lb_ctx {
struct ibv_qp *qp; /* QP object. */
void *ibv_cq; /* Completion queue. */
- uint16_t refcnt; /* Reference count for representors. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
/* HW steering queue job descriptor type. */
@@ -481,10 +481,10 @@ enum mlx5_counter_type {
/* Counter age parameter. */
struct mlx5_age_param {
- uint16_t state; /**< Age state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
uint16_t port_id; /**< Port id of the counter. */
uint32_t timeout:24; /**< Aging timeout in seconds. */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/**< Time in seconds since last hit (atomically accessed). */
void *context; /**< Flow counter age context. */
};
@@ -497,7 +497,7 @@ struct flow_counter_stats {
/* Shared counters information for counters. */
struct mlx5_flow_counter_shared {
union {
- uint32_t refcnt; /* Only for shared action management. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
uint32_t id; /* User counter ID for legacy sharing. */
};
};
@@ -588,7 +588,7 @@ struct mlx5_counter_stats_raw {
/* Counter global management structure. */
struct mlx5_flow_counter_mng {
- volatile uint16_t n_valid; /* Number of valid pools. */
+ volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
uint16_t last_pool_idx; /* Last used pool index */
int min_id; /* The minimum counter ID in the pools. */
int max_id; /* The maximum counter ID in the pools. */
@@ -654,7 +654,7 @@ struct mlx5_aso_sq {
struct mlx5_aso_age_action {
LIST_ENTRY(mlx5_aso_age_action) next;
void *dr_action;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
/* Following fields relevant only when action is active. */
uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
struct mlx5_age_param age_params;
@@ -688,7 +688,7 @@ struct mlx5_geneve_tlv_option_resource {
rte_be16_t option_class; /* geneve tlv opt class.*/
uint8_t option_type; /* geneve tlv opt type.*/
uint8_t length; /* geneve tlv opt length. */
- uint32_t refcnt; /* geneve tlv object reference counter */
+ RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
};
@@ -903,7 +903,7 @@ struct mlx5_flow_meter_policy {
uint16_t group;
/* The group. */
rte_spinlock_t sl;
- uint32_t ref_cnt;
+ RTE_ATOMIC(uint32_t) ref_cnt;
/* Use count. */
struct rte_flow_pattern_template *hws_item_templ;
/* Hardware steering item templates. */
@@ -1038,7 +1038,7 @@ struct mlx5_flow_meter_profile {
struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
/**< srtcm_rfc2697 struct. */
};
- uint32_t ref_cnt; /**< Use count. */
+ RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
uint32_t g_support:1; /**< If G color will be generated. */
uint32_t y_support:1; /**< If Y color will be generated. */
uint32_t initialized:1; /**< Initialized. */
@@ -1078,7 +1078,7 @@ struct mlx5_aso_mtr {
enum mlx5_aso_mtr_type type;
struct mlx5_flow_meter_info fm;
/**< Pointer to the next aso flow meter structure. */
- uint8_t state; /**< ASO flow meter state. */
+ RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
uint32_t offset;
enum rte_color init_color;
};
@@ -1124,7 +1124,7 @@ struct mlx5_flow_mtr_mng {
/* Default policy table. */
uint32_t def_policy_id;
/* Default policy id. */
- uint32_t def_policy_ref_cnt;
+ RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
/** def_policy meter use count. */
struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
/* Meter drop table. */
@@ -1197,8 +1197,8 @@ struct mlx5_txpp_wq {
/* Tx packet pacing internal timestamp. */
struct mlx5_txpp_ts {
- uint64_t ci_ts;
- uint64_t ts;
+ RTE_ATOMIC(uint64_t) ci_ts;
+ RTE_ATOMIC(uint64_t) ts;
};
/* Tx packet pacing structure. */
@@ -1221,12 +1221,12 @@ struct mlx5_dev_txpp {
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
/* Statistics counters. */
- uint64_t err_miss_int; /* Missed service interrupt. */
- uint64_t err_rearm_queue; /* Rearm Queue errors. */
- uint64_t err_clock_queue; /* Clock Queue errors. */
- uint64_t err_ts_past; /* Timestamp in the past. */
- uint64_t err_ts_future; /* Timestamp in the distant future. */
- uint64_t err_ts_order; /* Timestamp not in ascending order. */
+ RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
+ RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
+ RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
+ RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
+ RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
+ RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
};
/* Sample ID information of eCPRI flex parser structure. */
@@ -1287,16 +1287,16 @@ struct mlx5_aso_ct_action {
void *dr_action_orig;
/* General action object for reply dir. */
void *dr_action_rply;
- uint32_t refcnt; /* Action used count in device flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */
uint16_t peer; /* The only peer port index could also use this CT. */
- enum mlx5_aso_ct_state state; /* ASO CT state. */
+ RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
bool is_original; /* The direction of the DR action to be used. */
};
/* CT action object state update. */
#define MLX5_ASO_CT_UPDATE_STATE(c, s) \
- __atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+ rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -1370,7 +1370,7 @@ struct mlx5_flex_pattern_field {
/* Port flex item context. */
struct mlx5_flex_item {
struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
- uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
uint32_t mapnum; /* Number of pattern translation entries. */
struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
@@ -1383,7 +1383,7 @@ struct mlx5_flex_item {
#define MLX5_SRV6_SAMPLE_NUM 5
/* Mlx5 internal flex parser profile structure. */
struct mlx5_internal_flex_parser_profile {
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
struct mlx5_flex_item flex; /* Hold map info for modify field. */
};
@@ -1512,9 +1512,9 @@ struct mlx5_dev_ctx_shared {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
#endif
- struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
- struct mlx5_hlist *modify_cmds;
- struct mlx5_hlist *tag_table;
+ RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
+ RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
+ RTE_ATOMIC(struct mlx5_hlist *) tag_table;
struct mlx5_list *port_id_action_list; /* Port ID action list. */
struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
struct mlx5_list *sample_action_list; /* List of sample actions. */
@@ -1525,7 +1525,7 @@ struct mlx5_dev_ctx_shared {
/* SW steering counters management structure. */
void *default_miss_action; /* Default miss action. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
- struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
+ RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
/* Shared interrupt handler section. */
struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
@@ -1570,7 +1570,7 @@ struct mlx5_dev_ctx_shared {
* Caution, secondary process may rebuild the struct during port start.
*/
struct mlx5_proc_priv {
- void *hca_bar;
+ RTE_ATOMIC(void *) hca_bar;
/* Mapped HCA PCI BAR area. */
size_t uar_table_sz;
/* Size of UAR register table. */
@@ -1635,7 +1635,7 @@ struct mlx5_rxq_obj {
/* Indirection table. */
struct mlx5_ind_table_obj {
LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
union {
void *ind_table; /**< Indirection table. */
struct mlx5_devx_obj *rqt; /* DevX RQT object. */
@@ -1826,7 +1826,7 @@ enum mlx5_quota_state {
};
struct mlx5_quota {
- uint8_t state; /* object state */
+ RTE_ATOMIC(uint8_t) state; /* object state */
uint8_t mode; /* metering mode */
/**
* Keep track of application update types.
@@ -1955,7 +1955,7 @@ struct mlx5_priv {
uint32_t flex_item_map; /* Map of allocated flex item elements. */
uint32_t nb_queue; /* HW steering queue number. */
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
- uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
+ RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -2007,7 +2007,7 @@ struct mlx5_priv {
#endif
struct rte_eth_dev *shared_host; /* Host device for HW steering. */
- uint16_t shared_refcnt; /* HW steering host reference counter. */
+ RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f31fdfb..1954975 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4623,8 +4623,8 @@ struct mlx5_translated_action_handle {
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
- __atomic_fetch_add(&shared_rss->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
+ rte_memory_order_relaxed);
return idx;
default:
break;
@@ -7459,7 +7459,7 @@ struct mlx5_list_entry *
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
- __atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
mlx5_free(default_miss_ctx.queue);
}
mlx5_flow_pop_thread_workspace();
@@ -7470,10 +7470,10 @@ struct mlx5_list_entry *
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
if (rss_desc->shared_rss)
- __atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
+ rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
+ rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
@@ -7976,7 +7976,8 @@ struct rte_flow *
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
+ rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
flow_mreg_del_copy_action(dev, flow);
@@ -9456,7 +9457,7 @@ struct mlx5_flow_workspace*
{
uint32_t pools_n, us;
- pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
+ pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -9558,17 +9559,17 @@ struct mlx5_flow_workspace*
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
age_param = MLX5_CNT_TO_AGE(cnt);
- if (__atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) != AGE_CANDIDATE)
+ if (rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) != AGE_CANDIDATE)
continue;
if (cur->data[i].hits != prev->data[i].hits) {
- __atomic_store_n(&age_param->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
continue;
}
- if (__atomic_fetch_add(&age_param->sec_since_last_hit,
+ if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
time_delta,
- __ATOMIC_RELAXED) + time_delta <= age_param->timeout)
+ rte_memory_order_relaxed) + time_delta <= age_param->timeout)
continue;
/**
* Hold the lock first, or if between the
@@ -9579,10 +9580,10 @@ struct mlx5_flow_workspace*
priv = rte_eth_devices[age_param->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_TMOUT, false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_TMOUT,
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
}
@@ -11407,7 +11408,7 @@ struct tunnel_db_element_release_ctx {
{
struct tunnel_db_element_release_ctx *ctx = x;
ctx->ret = 0;
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index cc1e8cf..9256aec 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1049,7 +1049,7 @@ struct mlx5_flow_tunnel {
LIST_ENTRY(mlx5_flow_tunnel) chain;
struct rte_flow_tunnel app_tunnel; /** app tunnel copy */
uint32_t tunnel_id; /** unique tunnel ID */
- uint32_t refctn;
+ RTE_ATOMIC(uint32_t) refctn;
struct rte_flow_action action;
struct rte_flow_item item;
struct mlx5_hlist *groups; /** tunnel groups */
@@ -1470,7 +1470,7 @@ struct rte_flow_pattern_template {
struct mlx5dr_match_template *mt; /* mlx5 match template. */
uint64_t item_flags; /* Item layer flags. */
uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
/*
* If true, then rule pattern should be prepended with
* represented_port pattern item.
@@ -1502,7 +1502,7 @@ struct rte_flow_actions_template {
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint16_t recom_off; /* Offset of DR IPv6 routing push remove action. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
uint8_t flex_item; /* flex item index. */
};
@@ -1855,7 +1855,7 @@ struct rte_flow_template_table {
/* Shared RSS action structure */
struct mlx5_shared_action_rss {
ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
- uint32_t refcnt; /**< Atomically accessed refcnt. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
struct mlx5_ind_table_obj *ind_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index ab9eb21..a94b228 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -619,7 +619,7 @@
uint8_t *u8addr;
uint8_t hit;
- if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=
AGE_CANDIDATE)
continue;
byte = 63 - (j / 8);
@@ -627,13 +627,13 @@
u8addr = (uint8_t *)addr;
hit = (u8addr[byte] >> offset) & 0x1;
if (hit) {
- __atomic_store_n(&ap->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
} else {
struct mlx5_priv *priv;
- __atomic_fetch_add(&ap->sec_since_last_hit,
- diff, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
+ diff, rte_memory_order_relaxed);
/* If timeout passed add to aged-out list. */
if (ap->sec_since_last_hit <= ap->timeout)
continue;
@@ -641,12 +641,11 @@
rte_eth_devices[ap->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&ap->state,
+ if (rte_atomic_compare_exchange_strong_explicit(&ap->state,
&expected,
AGE_TMOUT,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
LIST_INSERT_HEAD(&age_info->aged_aso,
act, next);
MLX5_AGE_SET(age_info,
@@ -946,10 +945,10 @@
for (i = 0; i < n; ++i) {
aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
MLX5_ASSERT(aso_mtr);
- verdict = __atomic_compare_exchange_n(&aso_mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
}
sq->tail += n;
@@ -1005,10 +1004,10 @@
mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,
MLX5_INDIRECT_ACTION_IDX_GET(job->action));
MLX5_ASSERT(mtr);
- verdict = __atomic_compare_exchange_n(&mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
}
@@ -1103,7 +1102,7 @@
struct mlx5_aso_sq *sq;
struct mlx5_dev_ctx_shared *sh = priv->sh;
uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
- uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+ uint8_t state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);
poll_cq_t poll_mtr_cq =
is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;
@@ -1112,7 +1111,7 @@
sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);
do {
poll_mtr_cq(priv, sq);
- if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==
ASO_METER_READY)
return 0;
/* Waiting for CQE ready. */
@@ -1411,7 +1410,7 @@
uint16_t wqe_idx;
struct mlx5_aso_ct_pool *pool;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_FREE) {
DRV_LOG(ERR, "Fail: No context to query");
@@ -1620,12 +1619,12 @@
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
else
sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
/* Waiting for CQE ready, consider should block or sleep. */
@@ -1791,7 +1790,7 @@
bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (sh->config.dv_flow_en == 2)
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
@@ -1807,7 +1806,7 @@
}
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ state = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_READY ||
state == ASO_CONNTRACK_QUERY)
return 0;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d434c67..f9c56af 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -313,7 +313,7 @@ enum mlx5_l3_tunnel_detection {
}
static inline struct mlx5_hlist *
-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,
const char *name, uint32_t size, bool direct_key,
bool lcores_share, void *ctx,
mlx5_list_create_cb cb_create,
@@ -327,7 +327,7 @@ enum mlx5_l3_tunnel_detection {
struct mlx5_hlist *expected = NULL;
char s[MLX5_NAME_SIZE];
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
if (likely(hl))
return hl;
snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
@@ -341,11 +341,11 @@ enum mlx5_l3_tunnel_detection {
"cannot allocate resource memory");
return NULL;
}
- if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_hlist_destroy(hl);
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
}
return hl;
}
@@ -6139,8 +6139,8 @@ struct mlx5_list_entry *
static struct mlx5_indexed_pool *
flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
{
- struct mlx5_indexed_pool *ipool = __atomic_load_n
- (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+ struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
+ (&sh->mdh_ipools[index], rte_memory_order_seq_cst);
if (!ipool) {
struct mlx5_indexed_pool *expected = NULL;
@@ -6165,13 +6165,13 @@ struct mlx5_list_entry *
ipool = mlx5_ipool_create(&cfg);
if (!ipool)
return NULL;
- if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
- &expected, ipool, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
+ &expected, ipool,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_ipool_destroy(ipool);
- ipool = __atomic_load_n(&sh->mdh_ipools[index],
- __ATOMIC_SEQ_CST);
+ ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
+ rte_memory_order_seq_cst);
}
}
return ipool;
@@ -6992,9 +6992,9 @@ struct mlx5_list_entry *
age_info = GET_PORT_AGE_INFO(priv);
age_param = flow_dv_counter_idx_get_age(dev, counter);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since counter may still in process.
@@ -7002,7 +7002,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -7038,8 +7038,8 @@ struct mlx5_list_entry *
* indirect action API, shared info is 1 before the reduction,
* so this condition is failed and function doesn't return here.
*/
- if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
- __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
+ rte_memory_order_relaxed) - 1)
return;
}
cnt->pool = pool;
@@ -10203,8 +10203,8 @@ struct mlx5_list_entry *
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
} else {
ret = rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -10243,8 +10243,8 @@ struct mlx5_list_entry *
geneve_opt_resource->option_class = geneve_opt_v->option_class;
geneve_opt_resource->option_type = geneve_opt_v->option_type;
geneve_opt_resource->length = geneve_opt_v->option_len;
- __atomic_store_n(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
}
exit:
rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
@@ -12192,8 +12192,8 @@ struct mlx5_list_entry *
(void *)(uintptr_t)(dev_flow->flow_idx);
age_param->timeout = age->timeout;
age_param->port_id = dev->data->port_id;
- __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);
return counter;
}
@@ -13241,9 +13241,9 @@ struct mlx5_list_entry *
uint16_t expected = AGE_CANDIDATE;
age_info = GET_PORT_AGE_INFO(priv);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since age action may still in process.
@@ -13251,7 +13251,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
LIST_REMOVE(age, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -13275,7 +13275,7 @@ struct mlx5_list_entry *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
- uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
flow_dv_aso_age_remove_from_age(dev, age);
@@ -13451,7 +13451,7 @@ struct mlx5_list_entry *
return 0; /* 0 is an error. */
}
}
- __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);
return pool->index | ((age_free->offset + 1) << 16);
}
@@ -13481,10 +13481,10 @@ struct mlx5_list_entry *
aso_age->age_params.context = context;
aso_age->age_params.timeout = timeout;
aso_age->age_params.port_id = dev->data->port_id;
- __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
- __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
static void
@@ -13666,12 +13666,12 @@ struct mlx5_list_entry *
uint32_t ret;
struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
/* Cannot release when CT is in the ASO SQ. */
if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
return -1;
- ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
if (ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -13861,7 +13861,7 @@ struct mlx5_list_entry *
pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
/* 0: inactive, 1: created, 2+: used by flows. */
- __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
if (!ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -14813,8 +14813,8 @@ struct mlx5_list_entry *
age_act = flow_aso_age_get_by_idx(dev, owner_idx);
if (flow->age == 0) {
flow->age = owner_idx;
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&age_act->refcnt, 1,
+ rte_memory_order_relaxed);
}
age_act_pos = actions_n++;
action_flags |= MLX5_FLOW_ACTION_AGE;
@@ -14851,9 +14851,9 @@ struct mlx5_list_entry *
} else {
if (flow->counter == 0) {
flow->counter = owner_idx;
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&cnt_act->shared_info.refcnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
}
/* Save information first, will apply later. */
action_flags |= MLX5_FLOW_ACTION_COUNT;
@@ -15185,8 +15185,8 @@ struct mlx5_list_entry *
flow->indirect_type =
MLX5_INDIRECT_ACTION_TYPE_CT;
flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
}
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
@@ -15855,7 +15855,7 @@ struct mlx5_list_entry *
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
- __atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
}
void
@@ -16038,8 +16038,8 @@ struct mlx5_list_entry *
sh->geneve_tlv_option_resource;
rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
if (geneve_opt_resource) {
- if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED) - 1)) {
+ if (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
claim_zero(mlx5_devx_cmd_destroy
(geneve_opt_resource->obj));
mlx5_free(sh->geneve_tlv_option_resource);
@@ -16448,7 +16448,7 @@ struct mlx5_list_entry *
/* Update queue with indirect table queue memoyr. */
origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
- __atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
@@ -16494,9 +16494,9 @@ struct mlx5_list_entry *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid shared action");
- if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
- 0, 0, __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,
+ 0, rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -16632,10 +16632,10 @@ struct rte_flow_action_handle *
return __flow_dv_action_rss_release(dev, idx, error);
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
- if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
- &no_flow_refcnt, 1, false,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
+ &no_flow_refcnt, 1,
+ rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -17595,13 +17595,13 @@ struct rte_flow_action_handle *
case MLX5_INDIRECT_ACTION_TYPE_AGE:
age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
resp = data;
- resp->aged = __atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) == AGE_TMOUT ?
+ resp->aged = rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) == AGE_TMOUT ?
1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_dv_query_count(dev, idx, data, error);
@@ -17678,12 +17678,12 @@ struct rte_flow_action_handle *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
}
- resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+ resp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==
AGE_TMOUT ? 1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 4ae03a2..8a02247 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -86,7 +86,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map |= 1u << idx;
}
}
@@ -107,7 +107,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map &= ~(1u << idx);
rte_spinlock_unlock(&priv->flex_item_sl);
}
@@ -379,7 +379,7 @@
return ret;
}
if (acquire)
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return ret;
}
@@ -414,7 +414,7 @@
rte_errno = -EINVAL;
return -EINVAL;
}
- __atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
return 0;
}
@@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *
}
flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
/* Mark initialized flex item valid. */
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return (struct rte_flow_item_flex_handle *)flex;
error:
@@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
"invalid flex item handle value");
}
- if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
+ rte_memory_order_acquire, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&priv->flex_item_sl);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9ebbe66..8891f3c 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -715,7 +715,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
}
if (acts->mark)
- if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed) - 1))
flow_hw_rxq_flag_set(dev, false);
if (acts->jump) {
@@ -2298,7 +2299,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
goto err;
acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
- __atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
@@ -4537,8 +4539,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
uint8_t i;
for (i = 0; i < nb_action_templates; i++) {
- uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED);
+ uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (refcnt <= 1) {
rte_flow_error_set(error, EINVAL,
@@ -4576,8 +4578,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
while (i--) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_sub_fetch(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
return rte_errno;
}
@@ -4748,8 +4750,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
- ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED) + 1;
+ ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (ret <= 1) {
rte_errno = EINVAL;
goto it_error;
@@ -4800,14 +4802,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
for (i = 0; i < nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_fetch_sub(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
i = nb_item_templates;
it_error:
while (i--)
- __atomic_fetch_sub(&item_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
error:
err = rte_errno;
if (tbl) {
@@ -5039,12 +5041,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
LIST_REMOVE(table, next);
for (i = 0; i < table->nb_item_templates; i++)
- __atomic_fetch_sub(&table->its[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
+ 1, rte_memory_order_relaxed);
for (i = 0; i < table->nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
- __atomic_fetch_sub(&table->ats[i].action_template->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
+ 1, rte_memory_order_relaxed);
}
flow_hw_destroy_table_multi_pattern_ctx(table);
if (table->matcher_info[0].matcher)
@@ -7287,7 +7289,7 @@ enum mlx5_hw_indirect_list_relative_position {
if (!at->tmpl)
goto error;
at->action_flags = action_flags;
- __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
return at;
error:
@@ -7323,7 +7325,7 @@ enum mlx5_hw_indirect_list_relative_position {
uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Action template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -7897,7 +7899,7 @@ enum mlx5_hw_indirect_list_relative_position {
break;
}
}
- __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
rte_errno = pattern_template_validate(dev, &it, 1);
if (rte_errno)
goto error;
@@ -7933,7 +7935,7 @@ enum mlx5_hw_indirect_list_relative_position {
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Item template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -10513,7 +10515,8 @@ struct mlx5_list_entry *
}
dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
priv->shared_host = host_dev;
- __atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
}
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
@@ -10698,7 +10701,8 @@ struct mlx5_list_entry *
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
if (priv->hw_q) {
@@ -10814,7 +10818,8 @@ struct mlx5_list_entry *
priv->hw_q = NULL;
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
mlx5_free(priv->hw_attr);
@@ -10872,8 +10877,8 @@ struct mlx5_list_entry *
NULL,
"Invalid CT destruction index");
}
- __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
+ rte_memory_order_relaxed);
mlx5_ipool_free(pool->cts, idx);
return 0;
}
@@ -11572,7 +11577,7 @@ struct mlx5_hw_q_job *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_REPORTED:
case HWS_AGE_AGED_OUT_NOT_REPORTED:
resp->aged = 1;
@@ -11592,8 +11597,8 @@ struct mlx5_hw_q_job *
}
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (¶m->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (¶m->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index ca361f7..da3289b 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -2055,9 +2055,9 @@ struct mlx5_flow_meter_policy *
NULL, "Meter profile id not valid.");
/* Meter policy must exist. */
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
if (!priv->sh->config.dv_esw_en)
domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -2137,7 +2137,7 @@ struct mlx5_flow_meter_policy *
fm->is_enable = params->meter_enable;
fm->shared = !!shared;
fm->color_aware = !!params->use_prev_mtr_color;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
fm->def_policy = 1;
fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
@@ -2166,7 +2166,7 @@ struct mlx5_flow_meter_policy *
}
fm->active_state = params->meter_enable;
if (mtr_policy)
- __atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
error:
mlx5_flow_destroy_mtr_tbls(dev, fm);
@@ -2271,8 +2271,8 @@ struct mlx5_flow_meter_policy *
NULL, "Failed to create devx meter.");
}
fm->active_state = params->meter_enable;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
- __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
}
#endif
@@ -2295,7 +2295,7 @@ struct mlx5_flow_meter_policy *
if (fmp == NULL)
return -1;
/* Update dependencies. */
- __atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);
fm->profile = NULL;
/* Remove from list. */
if (!priv->sh->meter_aso_en) {
@@ -2313,15 +2313,15 @@ struct mlx5_flow_meter_policy *
}
mlx5_flow_destroy_mtr_tbls(dev, fm);
if (fm->def_policy)
- __atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
+ 1, rte_memory_order_relaxed);
if (priv->sh->meter_aso_en) {
if (!fm->def_policy) {
mtr_policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
if (mtr_policy)
- __atomic_fetch_sub(&mtr_policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
fm->policy_id = 0;
}
fm->def_policy = 0;
@@ -2424,13 +2424,13 @@ struct mlx5_flow_meter_policy *
RTE_MTR_ERROR_TYPE_UNSPECIFIED,
NULL, "Meter object is being used.");
/* Destroy the meter profile. */
- __atomic_fetch_sub(&fm->profile->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
+ 1, rte_memory_order_relaxed);
/* Destroy the meter policy. */
policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
- __atomic_fetch_sub(&policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c
index 14a2a8b..6ad0e8a 100644
--- a/drivers/net/mlx5/mlx5_flow_quota.c
+++ b/drivers/net/mlx5/mlx5_flow_quota.c
@@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
struct mlx5_quota *quota_obj =
sq->elts[(sq->tail + i) & mask].quota_obj;
- __atomic_compare_exchange_n("a_obj->state, &state,
- MLX5_QUOTA_STATE_READY, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit("a_obj->state, &state,
+ MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
}
}
@@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
rte_spinlock_lock(&sq->sqsl);
mlx5_quota_cmd_completion_handle(sq);
rte_spinlock_unlock(&sq->sqsl);
- if (__atomic_load_n("a_obj->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit("a_obj->state, rte_memory_order_relaxed) ==
MLX5_QUOTA_STATE_READY)
return 0;
} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
{
uint8_t state = MLX5_QUOTA_STATE_READY;
- bool verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ bool verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict)
return rte_flow_error_set(error, EBUSY,
@@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
async_job ? async_job : &sync_job, push, NULL);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
async_job ? async_job : &sync_job, push,
(void *)(uintptr_t)update->conf);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -593,9 +593,9 @@ struct rte_flow_action_handle *
NULL, "quota: failed to allocate quota object");
return NULL;
}
- verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: new quota object has invalid state");
@@ -616,8 +616,8 @@ struct rte_flow_action_handle *
(void *)(uintptr_t)conf);
if (ret) {
mlx5_ipool_free(qctx->quota_ipool, id);
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
+ rte_memory_order_relaxed);
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: WR failure");
return 0;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index c31f2f3..1b625e0 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -149,7 +149,7 @@
}
if (param->timeout == 0)
continue;
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_NOT_REPORTED:
case HWS_AGE_AGED_OUT_REPORTED:
/* Already aged-out, no action is needed. */
@@ -171,8 +171,8 @@
hits = rte_be_to_cpu_64(stats[i].hits);
if (param->nb_cnts == 1) {
if (hits != param->accumulator_last_hits) {
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
param->accumulator_last_hits = hits;
continue;
}
@@ -184,8 +184,8 @@
param->accumulator_cnt = 0;
if (param->accumulator_last_hits !=
param->accumulator_hits) {
- __atomic_store_n(¶m->sec_since_last_hit,
- 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit,
+ 0, rte_memory_order_relaxed);
param->accumulator_last_hits =
param->accumulator_hits;
param->accumulator_hits = 0;
@@ -193,9 +193,9 @@
}
param->accumulator_hits = 0;
}
- if (__atomic_fetch_add(¶m->sec_since_last_hit, time_delta,
- __ATOMIC_RELAXED) + time_delta <=
- __atomic_load_n(¶m->timeout, __ATOMIC_RELAXED))
+ if (rte_atomic_fetch_add_explicit(¶m->sec_since_last_hit, time_delta,
+ rte_memory_order_relaxed) + time_delta <=
+ rte_atomic_load_explicit(¶m->timeout, rte_memory_order_relaxed))
continue;
/* Prepare the relevant ring for this AGE parameter */
if (priv->hws_strict_queue)
@@ -203,10 +203,10 @@
else
r = age_info->hw_age.aged_list;
/* Changing the state atomically and insert it into the ring. */
- if (__atomic_compare_exchange_n(¶m->state, &expected1,
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected1,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
sizeof(uint32_t),
1, NULL);
@@ -221,11 +221,10 @@
*/
expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
if (ret == 0 &&
- !__atomic_compare_exchange_n(¶m->state,
+ !rte_atomic_compare_exchange_strong_explicit(¶m->state,
&expected2, expected1,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected2 == HWS_AGE_FREE)
mlx5_hws_age_param_free(priv,
param->own_cnt_index,
@@ -235,10 +234,10 @@
if (!priv->hws_strict_queue)
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
} else {
- __atomic_compare_exchange_n(¶m->state, &expected2,
+ rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected2,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
}
}
/* The event is irrelevant in strict queue mode. */
@@ -796,8 +795,8 @@ struct mlx5_hws_cnt_pool *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
- switch (__atomic_exchange_n(¶m->state, HWS_AGE_FREE,
- __ATOMIC_RELAXED)) {
+ switch (rte_atomic_exchange_explicit(¶m->state, HWS_AGE_FREE,
+ rte_memory_order_relaxed)) {
case HWS_AGE_CANDIDATE:
case HWS_AGE_AGED_OUT_REPORTED:
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
@@ -862,8 +861,8 @@ struct mlx5_hws_cnt_pool *
"cannot allocate AGE parameter");
return 0;
}
- MLX5_ASSERT(__atomic_load_n(¶m->state,
- __ATOMIC_RELAXED) == HWS_AGE_FREE);
+ MLX5_ASSERT(rte_atomic_load_explicit(¶m->state,
+ rte_memory_order_relaxed) == HWS_AGE_FREE);
if (shared) {
param->nb_cnts = 0;
param->accumulator_hits = 0;
@@ -914,9 +913,9 @@ struct mlx5_hws_cnt_pool *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
if (update_ade->timeout_valid) {
- uint32_t old_timeout = __atomic_exchange_n(¶m->timeout,
+ uint32_t old_timeout = rte_atomic_exchange_explicit(¶m->timeout,
update_ade->timeout,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (old_timeout == 0)
sec_since_last_hit_reset = true;
@@ -935,8 +934,8 @@ struct mlx5_hws_cnt_pool *
state_update = true;
}
if (sec_since_last_hit_reset)
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
if (state_update) {
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
@@ -945,13 +944,13 @@ struct mlx5_hws_cnt_pool *
* - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
* - AGED_OUT_REPORTED -> CANDIDATE
*/
- if (!__atomic_compare_exchange_n(¶m->state, &expected,
+ if (!rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
HWS_AGE_CANDIDATE_INSIDE_RING,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected == HWS_AGE_AGED_OUT_REPORTED)
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
return 0;
}
@@ -976,9 +975,9 @@ struct mlx5_hws_cnt_pool *
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
MLX5_ASSERT(param != NULL);
- if (__atomic_compare_exchange_n(¶m->state, &expected,
- HWS_AGE_AGED_OUT_REPORTED, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
+ HWS_AGE_AGED_OUT_REPORTED,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
return param->context;
switch (expected) {
case HWS_AGE_FREE:
@@ -990,8 +989,8 @@ struct mlx5_hws_cnt_pool *
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
break;
case HWS_AGE_CANDIDATE_INSIDE_RING:
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
break;
case HWS_AGE_CANDIDATE:
/*
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index 1cb0564..db4e99e 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -101,7 +101,7 @@ struct __rte_cache_aligned mlx5_hws_cnt_pool {
LIST_ENTRY(mlx5_hws_cnt_pool) next;
alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_pool_cfg cfg;
alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_dcs_mng dcs_mng;
- alignas(RTE_CACHE_LINE_SIZE) uint32_t query_gen;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) query_gen;
struct mlx5_hws_cnt *pool;
struct mlx5_hws_cnt_raw_data_mng *raw_mng;
struct rte_ring *reuse_list;
@@ -134,10 +134,10 @@ enum {
/* HWS counter age parameter. */
struct __rte_cache_aligned mlx5_hws_age_param {
- uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/* Time in seconds since last hit (atomically accessed). */
- uint16_t state; /* AGE state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
uint64_t accumulator_last_hits;
/* Last total value of hits for comparing. */
uint64_t accumulator_hits;
@@ -426,7 +426,7 @@ struct __rte_cache_aligned mlx5_hws_age_param {
iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
hpool->pool[iidx].in_used = false;
hpool->pool[iidx].query_gen_when_free =
- __atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
qcache = hpool->cache->qcache[*queue];
if (unlikely(qcache == NULL)) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index fb4d8e6..d008e4d 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {
/* RX queue private data. */
struct mlx5_rxq_priv {
uint16_t idx; /* Queue index. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -188,7 +188,7 @@ struct mlx5_rxq_priv {
/* External RX queue descriptor. */
struct mlx5_external_rxq {
uint32_t hw_id; /* Queue index in the Hardware. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
};
/* mlx5_rxq.c */
@@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
void *addr;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {
MLX5_ASSERT(rep != NULL);
/* Replace MPRQ buf. */
(*rxq->mprq_bufs)[rq_idx] = rep;
@@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
void *buf_addr;
/* Increment the refcnt of the whole chunk. */
- __atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
- MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
- __ATOMIC_RELAXED) <= strd_n + 1);
+ rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
+ MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
+ rte_memory_order_relaxed) <= strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
/*
* MLX5 device doesn't use iova but it is necessary in a
@@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
return false;
rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
- return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+ return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);
}
#define LWM_COOKIE_RXQID_OFFSET 0
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dd51687..f67aaa6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -416,7 +416,7 @@
rte_errno = EINVAL;
return -rte_errno;
}
- return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1319,7 +1319,7 @@
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
for (j = 0; j != strd_n; ++j) {
shinfo = &buf->shinfos[j];
shinfo->free_cb = mlx5_mprq_buf_free_cb;
@@ -2037,7 +2037,7 @@ struct mlx5_rxq_priv *
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
if (rxq != NULL)
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2059,7 +2059,7 @@ struct mlx5_rxq_priv *
if (rxq == NULL)
return 0;
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2138,7 +2138,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2158,7 +2158,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2447,8 +2447,8 @@ struct mlx5_ind_table_obj *
(memcmp(ind_tbl->queues, queues,
ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
== 0)) {
- __atomic_fetch_add(&ind_tbl->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
+ rte_memory_order_relaxed);
break;
}
}
@@ -2479,7 +2479,7 @@ struct mlx5_ind_table_obj *
unsigned int ret;
rte_rwlock_write_lock(&priv->ind_tbls_lock);
- ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
@@ -2561,7 +2561,7 @@ struct mlx5_ind_table_obj *
}
return ret;
}
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
return 0;
}
@@ -2626,7 +2626,7 @@ struct mlx5_ind_table_obj *
{
uint32_t refcnt;
- refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+ refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
if (refcnt <= 1)
return 0;
/*
@@ -3258,8 +3258,8 @@ struct mlx5_hrxq *
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
if (ext_rxq == NULL)
return -rte_errno;
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
if (ext_rxq->hw_id != hw_idx) {
DRV_LOG(ERR, "Port %u external RxQ index %u "
"is already mapped to HW index (requesting is "
@@ -3296,8 +3296,8 @@ struct mlx5_hrxq *
rte_errno = EINVAL;
return -rte_errno;
}
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
port_id, dpdk_idx);
rte_errno = EINVAL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f8d6728..c241a1d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1441,7 +1441,7 @@
rte_delay_us_sleep(1000 * priv->rxqs_n);
DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
if (priv->sh->config.dv_flow_en == 2) {
- if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))
flow_hw_rxq_flag_set(dev, false);
} else {
mlx5_flow_stop_default(dev);
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 107d7ab..0d77ff8 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -179,7 +179,7 @@ struct __rte_cache_aligned mlx5_txq_data {
__extension__
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
bool is_hairpin; /* Whether TxQ type is Hairpin. */
unsigned int max_inline_data; /* Max inline data. */
@@ -339,8 +339,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
* the service thread, data should be re-read.
*/
rte_compiler_barrier();
- ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
- ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
+ ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
+ ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
break;
@@ -350,8 +350,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts -= ts;
if (unlikely(mts >= UINT64_MAX / 2)) {
/* We have negative integer, mts is in the past. */
- __atomic_fetch_add(&sh->txpp.err_ts_past,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
+ 1, rte_memory_order_relaxed);
return -1;
}
tick = sh->txpp.tick;
@@ -360,8 +360,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts = (mts + tick - 1) / tick;
if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
/* We have mts is too distant future. */
- __atomic_fetch_add(&sh->txpp.err_ts_future,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
+ 1, rte_memory_order_relaxed);
return -1;
}
mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
@@ -1743,8 +1743,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
/* Convert the timestamp into completion to wait. */
ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
if (txq->ts_last && ts < txq->ts_last)
- __atomic_fetch_add(&txq->sh->txpp.err_ts_order,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
+ 1, rte_memory_order_relaxed);
txq->ts_last = ts;
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
sh = txq->sh;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 5a5df2d..4e26fa2 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -538,12 +538,12 @@
uint64_t *ps;
rte_compiler_barrier();
- tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
- op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
+ tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);
+ op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);
rte_compiler_barrier();
- if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
+ if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))
continue;
- if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
+ if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))
continue;
ps = (uint64_t *)ts;
ps[0] = tm;
@@ -561,8 +561,8 @@
ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
rte_compiler_barrier();
- __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);
rte_wmb();
}
@@ -590,8 +590,8 @@
*/
DRV_LOG(DEBUG,
"Clock Queue error sync lost (%X).", opcode);
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
return;
@@ -633,10 +633,10 @@
if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
return;
MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
- sh->txpp.ts.ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
- sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+ sh->txpp.ts.ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+ sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
sh->txpp.ts_p = 0;
if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -677,8 +677,8 @@
/* Check whether we have missed interrupts. */
if (cq_ci - wq->cq_ci != 1) {
DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
- __atomic_fetch_add(&sh->txpp.err_miss_int,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
+ 1, rte_memory_order_relaxed);
/* Check sync lost on wqe index. */
if (cq_ci - wq->cq_ci >=
(((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -693,8 +693,8 @@
/* Fire new requests to Rearm Queue. */
if (error) {
DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_rearm_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
}
@@ -987,8 +987,8 @@
mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
if (to.cts.op_own >> 4) {
DRV_LOG(DEBUG, "Clock Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
return -EIO;
}
@@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);
return 0;
}
@@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
do {
uint64_t ts, ci;
- ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
- ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
+ ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);
+ ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ts,
- __ATOMIC_RELAXED) != ts)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
+ rte_memory_order_relaxed) != ts)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
- __ATOMIC_RELAXED) != ci)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
+ rte_memory_order_relaxed) != ci)
continue;
tsa->ts = ts;
tsa->ci_ts = ci;
@@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
for (i = 0; i < n_txpp; ++i)
stats[n_used + i].id = n_used + i;
stats[n_used + 0].value =
- __atomic_load_n(&sh->txpp.err_miss_int,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_miss_int,
+ rte_memory_order_relaxed);
stats[n_used + 1].value =
- __atomic_load_n(&sh->txpp.err_rearm_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
+ rte_memory_order_relaxed);
stats[n_used + 2].value =
- __atomic_load_n(&sh->txpp.err_clock_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
+ rte_memory_order_relaxed);
stats[n_used + 3].value =
- __atomic_load_n(&sh->txpp.err_ts_past,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_past,
+ rte_memory_order_relaxed);
stats[n_used + 4].value =
- __atomic_load_n(&sh->txpp.err_ts_future,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_future,
+ rte_memory_order_relaxed);
stats[n_used + 5].value =
- __atomic_load_n(&sh->txpp.err_ts_order,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_order,
+ rte_memory_order_relaxed);
stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
stats[n_used + 8].value = sh->txpp.sync_lost;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 14f55e8..da4236f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *
rte_errno = ENOMEM;
goto error;
}
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
tmpl->is_hairpin = false;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->is_hairpin = true;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
- __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
}
return ctrl;
}
@@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *
if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
return 0;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
+ if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
return 1;
if (txq_ctrl->obj) {
priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *
txq_free_elts(txq_ctrl);
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
- if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
if (!txq_ctrl->is_hairpin)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq_ctrl, next);
@@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *
if (!(*priv->txqs)[idx])
return -1;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
}
/**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index e28db2e..fc03cc0 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -203,7 +203,7 @@ struct mlx5_indexed_pool *
struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
lc = pool->cache[cidx]->lc;
- gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
+ gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
if (gc && lc != gc) {
mlx5_ipool_lock(pool);
if (lc && !(--lc->ref_cnt))
@@ -266,8 +266,8 @@ struct mlx5_indexed_pool *
pool->cache[cidx]->len = fetch_size - 1;
return pool->cache[cidx]->idx[pool->cache[cidx]->len];
}
- trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
- __ATOMIC_ACQUIRE) : 0;
+ trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
+ rte_memory_order_acquire) : 0;
trunk_n = lc ? lc->n_trunk : 0;
cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
/* Check if index reach maximum. */
@@ -332,11 +332,11 @@ struct mlx5_indexed_pool *
lc = p;
lc->ref_cnt = 1;
pool->cache[cidx]->lc = lc;
- __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
}
/* Add trunk to trunks array. */
lc->trunks[trunk_idx] = trunk;
- __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
/* Enqueue half of the index to global. */
ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
fetch_size = trunk->free >> 1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index b51d977..d86a809 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {
struct mlx5_indexed_cache {
struct mlx5_indexed_trunk **trunks;
- volatile uint32_t n_trunk_valid; /* Trunks allocated. */
+ volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
uint32_t n_trunk; /* Trunk pointer array size. */
uint32_t ref_cnt;
uint32_t len;
@@ -266,7 +266,7 @@ struct mlx5_indexed_pool {
uint32_t free_list; /* Index to first free trunk. */
};
struct {
- struct mlx5_indexed_cache *gc;
+ RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
/* Global cache. */
struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
/* Local cache. */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* RE: [PATCH v4 01/45] net/mlx5: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 01/45] net/mlx5: use rte " Tyler Retzlaff
@ 2024-04-20 8:03 ` Morten Brørup
0 siblings, 0 replies; 300+ messages in thread
From: Morten Brørup @ 2024-04-20 8:03 UTC (permalink / raw)
To: Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Abdullah Sevincer, Ajit Khaparde,
Alok Prasad, Anatoly Burakov, Andrew Rybchenko, Anoob Joseph,
Bruce Richardson, Byron Marohn, Chenbo Xia, Chengwen Feng,
Ciara Loftus, Ciara Power, Dariusz Sosnowski, David Hunt,
Devendra Singh Rawat, Erik Gabriel Carrillo, Guoyang Zhou,
Harman Kalra, Harry van Haaren, Honnappa Nagarahalli,
Jakub Grajciar, Jerin Jacob, Jeroen de Borst, Jian Wang,
Jiawen Wu, Jie Hai, Jingjing Wu, Joshua Washington, Joyce Kong,
Junfeng Guo, Kevin Laatz, Konstantin Ananyev, Liang Ma, Long Li,
Maciej Czekaj, Matan Azrad, Maxime Coquelin, Nicolas Chautru,
Ori Kam, Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy,
Reshma Pattan, Rosen Xu, Ruifeng Wang, Rushil Gupta,
Sameh Gobriel, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang,
Yipeng Wang, Yisen Zhuang, Yuying Zhang, Yuying Zhang,
Ziyang Xuan
> From: Tyler Retzlaff [mailto:roretzla@linux.microsoft.com]
> Sent: Saturday, 20 April 2024 01.06
>
> Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
For the series,
Acked-by: Morten Brørup <mb@smartsharesystems.com>
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 02/45] net/ixgbe: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
2024-04-19 23:05 ` [PATCH v4 01/45] net/mlx5: use rte " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 03/45] net/iavf: " Tyler Retzlaff
` (42 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 14 ++++++++------
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 ++--
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c61c52b..e63ae1a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1130,7 +1130,7 @@ struct rte_ixgbe_xstats_name_off {
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbe_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -1638,7 +1638,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbevf_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
@@ -4203,7 +4203,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
/* NOTE: review for potential ordering optimization */
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -4240,7 +4240,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -4336,7 +4336,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
ixgbe_dev_wait_setup_link_complete(dev, 0);
/* NOTE: review for potential ordering optimization */
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the IXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -4348,7 +4349,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR,
"Create link thread failed!");
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 22fc3be..8ad841e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -511,7 +511,7 @@ struct ixgbe_adapter {
*/
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
- bool link_thread_running;
+ RTE_ATOMIC(bool) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 3d39eaa..0d42fd8 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1831,7 +1831,7 @@ const alignas(RTE_CACHE_LINE_SIZE) uint32_t
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
@@ -2114,7 +2114,7 @@ const alignas(RTE_CACHE_LINE_SIZE) uint32_t
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 03/45] net/iavf: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
2024-04-19 23:05 ` [PATCH v4 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 02/45] net/ixgbe: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 04/45] net/ice: " Tyler Retzlaff
` (41 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/iavf/iavf.h | 16 ++++++++--------
drivers/net/iavf/iavf_rxtx.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +++++++-------
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 7ab41c9..ad526c6 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -238,8 +238,8 @@ struct iavf_info {
struct virtchnl_vlan_caps vlan_v2_caps;
uint64_t supported_rxdid;
uint8_t *proto_xtr; /* proto xtr type for all queues */
- volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
- uint32_t pend_cmd_count;
+ volatile RTE_ATOMIC(enum virtchnl_ops) pend_cmd; /* pending command not finished */
+ RTE_ATOMIC(uint32_t) pend_cmd_count;
int cmd_retval; /* return value of the cmd response from PF */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
@@ -456,13 +456,13 @@ struct iavf_cmd_info {
_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 1, rte_memory_order_relaxed);
return !ret;
}
@@ -472,13 +472,13 @@ struct iavf_cmd_info {
_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 2, rte_memory_order_relaxed);
return !ret;
}
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 59a0b9e..ecc3143 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2025,7 +2025,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
@@ -2152,7 +2152,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
index 83825aa..20b656e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_neon.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
@@ -273,7 +273,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1111d30..6d5969f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -41,7 +41,7 @@ struct iavf_event_element {
};
struct iavf_event_handler {
- uint32_t ndev;
+ RTE_ATOMIC(uint32_t) ndev;
rte_thread_t tid;
int fd[2];
pthread_mutex_t lock;
@@ -129,7 +129,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_add(&handler->ndev, 1, __ATOMIC_RELAXED) + 1 != 1)
+ if (rte_atomic_fetch_add_explicit(&handler->ndev, 1, rte_memory_order_relaxed) + 1 != 1)
return 0;
#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
@@ -137,7 +137,7 @@ struct iavf_event_handler {
int err = pipe(handler->fd);
#endif
if (err != 0) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -146,7 +146,7 @@ struct iavf_event_handler {
if (rte_thread_create_internal_control(&handler->tid, "iavf-event",
iavf_dev_event_handle, NULL)) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -158,7 +158,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed) - 1 != 0)
return;
int unused = pthread_cancel((pthread_t)handler->tid.opaque_id);
@@ -574,8 +574,8 @@ struct iavf_event_handler {
/* read message and it's expected one */
if (msg_opc == vf->pend_cmd) {
uint32_t cmd_count =
- __atomic_fetch_sub(&vf->pend_cmd_count,
- 1, __ATOMIC_RELAXED) - 1;
+ rte_atomic_fetch_sub_explicit(&vf->pend_cmd_count,
+ 1, rte_memory_order_relaxed) - 1;
if (cmd_count == 0)
_notify_cmd(vf, msg_ret);
} else {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 04/45] net/ice: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (2 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 03/45] net/iavf: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 05/45] net/i40e: " Tyler Retzlaff
` (40 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ice/base/ice_osdep.h | 4 ++--
drivers/net/ice/ice_dcf.c | 6 +++---
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 ++++----
drivers/net/ice/ice_dcf_parent.c | 16 ++++++++--------
drivers/net/ice/ice_ethdev.c | 12 ++++++------
drivers/net/ice/ice_ethdev.h | 2 +-
7 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index 0e14b93..c17f1bf 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -235,7 +235,7 @@ struct ice_lock {
ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
struct ice_dma_mem *mem, u64 size)
{
- static uint64_t ice_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) ice_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -243,7 +243,7 @@ struct ice_lock {
return NULL;
snprintf(z_name, sizeof(z_name), "ice_dma_%" PRIu64,
- __atomic_fetch_add(&ice_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&ice_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
0, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7f8f516..204d4ea 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -764,7 +764,7 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
rte_spinlock_init(&hw->vc_cmd_queue_lock);
TAILQ_INIT(&hw->vc_cmd_queue);
- __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->vsi_update_thread_num, 0, rte_memory_order_relaxed);
hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
if (hw->arq_buf == NULL) {
@@ -888,8 +888,8 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
ice_dcf_dev_interrupt_handler, hw);
/* Wait for all `ice-thread` threads to exit. */
- while (__atomic_load_n(&hw->vsi_update_thread_num,
- __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(&hw->vsi_update_thread_num,
+ rte_memory_order_acquire) != 0)
rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
ice_dcf_mode_disable(hw);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index aa2a723..7726681 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -105,7 +105,7 @@ struct ice_dcf_hw {
void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen);
- int vsi_update_thread_num;
+ RTE_ATOMIC(int) vsi_update_thread_num;
uint8_t *arq_buf;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index d58ec9d..8f3a385 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -1743,7 +1743,7 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
ice_dcf_adminq_need_retry(struct ice_adapter *ad)
{
return ad->hw.dcf_enabled &&
- !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+ !rte_atomic_load_explicit(&ad->dcf_state_on, rte_memory_order_relaxed);
}
/* Add UDP tunneling port */
@@ -1944,12 +1944,12 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
return -1;
}
- __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true, rte_memory_order_relaxed);
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 6e845f4..a478b69 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -123,8 +123,8 @@ struct ice_dcf_reset_event_param {
container_of(hw, struct ice_dcf_adapter, real_hw);
struct ice_adapter *parent_adapter = &adapter->parent;
- __atomic_fetch_add(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_relaxed);
rte_thread_detach(rte_thread_self());
@@ -133,8 +133,8 @@ struct ice_dcf_reset_event_param {
rte_spinlock_lock(&vsi_update_lock);
if (!ice_dcf_handle_vsi_update_event(hw)) {
- __atomic_store_n(&parent_adapter->dcf_state_on, true,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true,
+ rte_memory_order_relaxed);
ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
}
@@ -156,8 +156,8 @@ struct ice_dcf_reset_event_param {
free(param);
- __atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_release);
return 0;
}
@@ -269,8 +269,8 @@ struct ice_dcf_reset_event_param {
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
start_vsi_reset_thread(dcf_hw, true,
pf_msg->event_data.vf_vsi_map.vf_id);
break;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 87385d2..0f35c6a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4062,9 +4062,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = &dev->data->dev_link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
@@ -4078,9 +4078,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 984479a..d73faae 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -621,7 +621,7 @@ struct ice_adapter {
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
/* True if DCF state of the associated PF is on */
- bool dcf_state_on;
+ RTE_ATOMIC(bool) dcf_state_on;
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 05/45] net/i40e: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (3 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 04/45] net/ice: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 06/45] net/hns3: " Tyler Retzlaff
` (39 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/i40e/i40e_ethdev.c | 4 ++--
drivers/net/i40e/i40e_rxtx.c | 6 +++---
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 380ce1a..801cc95 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -4687,7 +4687,7 @@ enum i40e_status_code
u64 size,
u32 alignment)
{
- static uint64_t i40e_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) i40e_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -4695,7 +4695,7 @@ enum i40e_status_code
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
- __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&i40e_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 5d25ab4..155f243 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -486,7 +486,7 @@
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
@@ -745,7 +745,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
@@ -867,7 +867,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index d873e30..3a99137 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -425,7 +425,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 06/45] net/hns3: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (4 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 05/45] net/i40e: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 07/45] net/bnxt: " Tyler Retzlaff
` (38 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/hns3/hns3_cmd.c | 18 ++++++------
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 +++++++++++------------
drivers/net/hns3/hns3_ethdev.h | 32 ++++++++++-----------
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++++++++++++++++++--------------------
drivers/net/hns3/hns3_intr.c | 36 +++++++++++------------
drivers/net/hns3/hns3_intr.h | 4 +--
drivers/net/hns3/hns3_mbx.c | 6 ++--
drivers/net/hns3/hns3_mp.c | 6 ++--
drivers/net/hns3/hns3_rxtx.c | 10 +++----
drivers/net/hns3/hns3_tm.c | 4 +--
11 files changed, 107 insertions(+), 107 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 001ff49..3c5fdbe 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,12 @@
hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
uint64_t size, uint32_t alignment)
{
- static uint64_t hns3_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
- __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
@@ -198,8 +198,8 @@
hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
csq->next_to_use, csq->next_to_clean);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- __atomic_store_n(&hw->reset.disable_cmd, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+ rte_memory_order_relaxed);
hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
}
@@ -313,7 +313,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (hns3_cmd_csq_done(hw))
return 0;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw,
"Don't wait for reply because of disable_cmd");
return -EBUSY;
@@ -360,7 +360,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
int retval;
uint32_t ntc;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -747,7 +747,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
ret = -EBUSY;
goto err_cmd_init;
}
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
ret = hns3_cmd_query_firmware_version_and_capability(hw);
if (ret) {
@@ -790,7 +790,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
return 0;
err_cmd_init:
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return ret;
}
@@ -819,7 +819,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (!hns->is_vf)
(void)hns3_firmware_compat_config(hw, false);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
/*
* A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 915e4eb..2f917fe 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,7 @@
* and configured directly to the hardware in the RESET_STAGE_RESTORE
* stage of the reset process.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a..327f6fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ struct hns3_intr_state {
};
static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
@@ -134,7 +134,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
hw->reset.stats.imp_cnt++;
@@ -148,7 +148,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
hw->reset.stats.global_cnt++;
@@ -1151,7 +1151,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* ensure that the hardware configuration remains unchanged before and
* after reset.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
}
@@ -1175,7 +1175,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* we will restore configurations to hardware in hns3_restore_vlan_table
* and hns3_restore_vlan_conf later.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
if (ret) {
hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5059,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5150,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5158,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
return ret;
hw->mac.link_status = RTE_ETH_LINK_DOWN;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -5184,7 +5184,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_tm_dev_stop_proc(hw);
hns3_config_mac_tnl_int(hw, false);
hns3_stop_tqps(hw);
@@ -5577,7 +5577,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
last_req = hns3_get_reset_level(hns, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -5677,7 +5677,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
}
static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
struct hns3_hw *hw = &hns->hw;
enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5737,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* any mailbox handling or command to firmware is only valid
* after hns3_cmd_init is called.
*/
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hw->reset.stats.request_cnt++;
break;
case HNS3_IMP_RESET:
@@ -5792,7 +5792,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -5913,10 +5913,10 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5925,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Check if there is any ongoing reset in the hardware. This status can
@@ -6576,7 +6576,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index a4bc62a..a6b6524 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -401,17 +401,17 @@ enum hns3_schedule {
struct hns3_reset_data {
enum hns3_reset_stage stage;
- uint16_t schedule;
+ RTE_ATOMIC(uint16_t) schedule;
/* Reset flag, covering the entire reset process */
- uint16_t resetting;
+ RTE_ATOMIC(uint16_t) resetting;
/* Used to disable sending cmds during reset */
- uint16_t disable_cmd;
+ RTE_ATOMIC(uint16_t) disable_cmd;
/* The reset level being processed */
enum hns3_reset_level level;
/* Reset level set, each bit represents a reset level */
- uint64_t pending;
+ RTE_ATOMIC(uint64_t) pending;
/* Request reset level set, from interrupt or mailbox */
- uint64_t request;
+ RTE_ATOMIC(uint64_t) request;
int attempts; /* Reset failure retry */
int retries; /* Timeout failure retry in reset_post */
/*
@@ -499,7 +499,7 @@ struct hns3_hw {
* by dev_set_link_up() or dev_start().
*/
bool set_link_down;
- unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+ RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */
struct hns3_tqp_stats tqp_stats;
/* Include Mac stats | Rx stats | Tx stats */
struct hns3_mac_stats mac_stats;
@@ -844,7 +844,7 @@ struct hns3_vf {
struct hns3_adapter *adapter;
/* Whether PF support push link status change to VF */
- uint16_t pf_push_lsc_cap;
+ RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
/*
* If PF support push link status change, VF still need send request to
@@ -853,7 +853,7 @@ struct hns3_vf {
*/
uint16_t req_link_info_cnt;
- uint16_t poll_job_started; /* whether poll job is started */
+ RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
};
struct hns3_adapter {
@@ -997,32 +997,32 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
hns3_read_reg((a)->io_base, (reg))
static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t res;
- res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+ res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & (1UL << nr)) != 0;
return res;
}
static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_or_explicit(addr, (1UL << nr), rte_memory_order_relaxed);
}
static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), rte_memory_order_relaxed);
}
static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t mask = (1UL << nr);
- return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+ return rte_atomic_fetch_and_explicit(addr, ~mask, rte_memory_order_relaxed) & mask;
}
int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a..b83d5b9 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
};
static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
@@ -484,7 +484,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* MTU value issued by hns3 VF PMD must be less than or equal to
* PF's MTU.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "Failed to set mtu during resetting");
return -EIO;
}
@@ -565,7 +565,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
val = hns3_read_dev(hw, HNS3_VF_RST_ING);
hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
}
static void
@@ -650,8 +650,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
struct hns3_vf_to_pf_msg req;
- __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+ rte_memory_order_release);
hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* mailbox from PF driver to get this capability.
*/
hns3vf_handle_mbx_msg(hw);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
break;
remain_ms--;
@@ -677,10 +677,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* state: unknown (means pf not ack), not_supported, supported.
* Here config it as 'not_supported' when it's 'unknown' state.
*/
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
hns3_info(hw, "detect PF support push link status change!");
} else {
@@ -920,7 +920,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
bool send_req;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return;
send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +956,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* sending request to PF kernel driver, then could update link status by
* process PF kernel driver's link status mailbox message.
*/
- if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
return;
if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +994,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_hw *hw = &hns->hw;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw,
"vf set vlan id failed during resetting, vlan_id =%u",
vlan_id);
@@ -1059,7 +1059,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
unsigned int tmp_mask;
int ret = 0;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
mask);
return -EIO;
@@ -1252,7 +1252,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
- __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
hns3vf_service_handler(dev);
}
@@ -1264,7 +1264,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rte_eal_alarm_cancel(hns3vf_service_handler, dev);
- __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
}
static int
@@ -1500,10 +1500,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -1528,7 +1528,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1643,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1773,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -1847,7 +1847,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (ret)
return ret;
}
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return 0;
}
@@ -1888,7 +1888,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* from table space. Hence, for function reset software intervention is
* required to delete the entries.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -2030,7 +2030,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
}
static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
enum hns3_reset_level reset_level;
@@ -2070,10 +2070,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2082,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Hardware reset has been notified, we now have to poll & check if
@@ -2278,7 +2278,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30..26fa2eb 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ enum hns3_hw_err_report_type {
static int
hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
- int num, uint64_t *levels,
+ int num, RTE_ATOMIC(uint64_t) *levels,
enum hns3_hw_err_report_type err_type)
{
const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,7 @@ enum hns3_hw_err_report_type {
hw->reset.request = 0;
hw->reset.pending = 0;
hw->reset.resetting = 0;
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
if (!hw->reset.wait_data) {
@@ -2419,8 +2419,8 @@ enum hns3_hw_err_report_type {
/* Reschedule the reset process after successful initialization */
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+ rte_memory_order_relaxed);
return;
}
@@ -2428,15 +2428,15 @@ enum hns3_hw_err_report_type {
return;
/* Schedule restart alarm if it is not scheduled yet */
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_REQUESTED)
return;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED)
rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
}
@@ -2453,11 +2453,11 @@ enum hns3_hw_err_report_type {
return;
}
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) !=
SCHEDULE_NONE)
return;
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
}
@@ -2537,7 +2537,7 @@ enum hns3_hw_err_report_type {
}
static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
uint64_t merge_cnt = hw->reset.stats.merge_cnt;
uint64_t tmp;
@@ -2633,7 +2633,7 @@ enum hns3_hw_err_report_type {
* Regardless of whether the execution is successful or not, the
* flow after execution must be continued.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
(void)hns3_cmd_init(hw);
reset_fail:
hw->reset.attempts = 0;
@@ -2661,7 +2661,7 @@ enum hns3_hw_err_report_type {
int ret;
if (hw->reset.stage == RESET_STAGE_NONE) {
- __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_DOWN;
hns3_report_reset_begin(hw);
ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2750,7 @@ enum hns3_hw_err_report_type {
hns3_notify_reset_ready(hw, false);
hns3_clear_reset_level(hw, &hw->reset.pending);
hns3_clear_reset_status(hw);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.attempts = 0;
hw->reset.stats.success_cnt++;
hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2812,7 @@ enum hns3_hw_err_report_type {
hw->reset.mbuf_deferred_free = false;
}
rte_spinlock_unlock(&hw->lock);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_NONE;
hns3_clock_gettime(&tv);
timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c07..1edb07d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
};
int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
void hns3_handle_error(struct hns3_adapter *hns);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc16..10c6e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,7 @@
mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
while (wait_time < mbx_time_limit) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw, "Don't wait for mbx response because of "
"disable_cmd");
return -EBUSY;
@@ -382,7 +382,7 @@
rte_spinlock_lock(&hw->cmq.crq.lock);
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
@@ -457,7 +457,7 @@
}
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f194..ba8f8ec 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,7 @@
int i;
if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
- __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+ rte_atomic_load_explicit(&hw->secondary_cnt, rte_memory_order_relaxed) == 0)
return;
if (!mp_req_type_is_valid(type)) {
@@ -277,7 +277,7 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev)
ret);
return ret;
}
- __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
} else {
ret = hns3_mp_init_primary();
if (ret) {
@@ -297,7 +297,7 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
process_data.eth_dev_cnt--;
if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 7e636a0..73a388b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4464,7 +4464,7 @@
struct hns3_adapter *hns = eth_dev->data->dev_private;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+ rte_atomic_load_explicit(&hns->hw.reset.resetting, rte_memory_order_relaxed) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4530,7 +4530,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4586,7 +4586,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4615,7 +4615,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4648,7 +4648,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d969164..92a6685 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
/* don't goto fail_clear, user may try later */
@@ -1141,7 +1141,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
return -EBUSY;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 07/45] net/bnxt: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (5 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 06/45] net/hns3: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 08/45] net/cpfl: " Tyler Retzlaff
` (37 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/bnxt/bnxt_cpr.h | 4 ++--
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 ++++++++-----
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 ++--
5 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index c7b3480..43f06fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -107,7 +107,7 @@ struct bnxt_cp_ring_info {
/**
* Check validity of a completion ring entry. If the entry is valid, include a
- * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * C11 rte_memory_order_acquire fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
@@ -130,7 +130,7 @@ struct bnxt_cp_ring_info {
expected = !(raw_cons & ring_size);
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
if (valid == expected) {
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
return true;
}
return false;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 77bc382..36e0ac3 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- uint64_t rx_mbuf_alloc_fail;
+ RTE_ATOMIC(uint64_t) rx_mbuf_alloc_fail;
uint8_t need_realloc;
const struct rte_memzone *mz;
};
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 957b7d6..69e8384 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -49,7 +49,8 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -86,7 +87,8 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -465,7 +467,8 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -1677,8 +1680,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return -ENOMEM;
}
}
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 775400f..04864e0 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -240,7 +240,7 @@
rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
/* Use acquire fence to order loads of descriptor words. */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Reload lower 64b of descriptors to make it ordered after info3_v. */
rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
((void *)&cpr->cp_desc_ring[cons + 7],
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 6a6feab..479f819 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -663,7 +663,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -724,7 +724,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 08/45] net/cpfl: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (6 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 07/45] net/bnxt: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 09/45] net/af_xdp: " Tyler Retzlaff
` (36 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/cpfl/cpfl_ethdev.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ef19aa1..5b47e22 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -300,8 +300,9 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed +=
+ rte_atomic_load_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -349,7 +350,8 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 09/45] net/af_xdp: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (7 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 08/45] net/cpfl: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 10/45] net/octeon_ep: " Tyler Retzlaff
` (35 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 268a130..4833180 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -116,7 +116,7 @@ struct xsk_umem_info {
const struct rte_memzone *mz;
struct rte_mempool *mb_pool;
void *buffer;
- uint8_t refcnt;
+ RTE_ATOMIC(uint8_t) refcnt;
uint32_t max_xsks;
};
@@ -995,7 +995,8 @@ static int link_xdp_prog_with_dev(int ifindex, int fd, __u32 flags)
break;
xsk_socket__delete(rxq->xsk);
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1,
+ rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
/* free pkt_tx_queue */
@@ -1097,8 +1098,8 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
ret = -1;
goto out;
}
- if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
- __ATOMIC_ACQUIRE)) {
+ if (rte_atomic_load_explicit(&internals->rx_queues[i].umem->refcnt,
+ rte_memory_order_acquire)) {
*umem = internals->rx_queues[i].umem;
goto out;
}
@@ -1131,11 +1132,11 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
return NULL;
if (umem != NULL &&
- __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
+ rte_atomic_load_explicit(&umem->refcnt, rte_memory_order_acquire) <
umem->max_xsks) {
AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
internals->if_name, rxq->xsk_queue_idx);
- __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&umem->refcnt, 1, rte_memory_order_acquire);
}
}
@@ -1177,7 +1178,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
mb_pool->name, umem->max_xsks);
}
- __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&umem->refcnt, 1, rte_memory_order_release);
}
return umem;
@@ -1606,7 +1607,8 @@ struct msg_internal {
if (rxq->umem == NULL)
return -ENOMEM;
txq->umem = rxq->umem;
- reserve_before = __atomic_load_n(&rxq->umem->refcnt, __ATOMIC_ACQUIRE) <= 1;
+ reserve_before = rte_atomic_load_explicit(&rxq->umem->refcnt,
+ rte_memory_order_acquire) <= 1;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
@@ -1723,7 +1725,7 @@ struct msg_internal {
out_xsk:
xsk_socket__delete(rxq->xsk);
out_umem:
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1, rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
return ret;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 10/45] net/octeon_ep: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (8 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 09/45] net/af_xdp: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 11/45] net/octeontx: " Tyler Retzlaff
` (34 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +++--
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +++--
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx2_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx_ep_common.h | 4 ++--
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 ++++--
6 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.h b/drivers/net/octeon_ep/cnxk_ep_rx.h
index ecf95cd..9422042 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.h
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.h
@@ -98,7 +98,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(droq->pkts_sent_ism, rte_memory_order_relaxed);
new_pkts = val - droq->pkts_sent_prev;
droq->pkts_sent_prev = val;
@@ -111,7 +111,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_tx.c b/drivers/net/octeon_ep/cnxk_ep_tx.c
index 233c8aa..e093140 100644
--- a/drivers/net/octeon_ep/cnxk_ep_tx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_tx.c
@@ -15,7 +15,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(iq->inst_cnt_ism, rte_memory_order_relaxed);
iq->inst_cnt += val - iq->inst_cnt_prev;
iq->inst_cnt_prev = val;
@@ -27,7 +27,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 39f357e..39b28de 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -150,10 +150,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
- (void *)iq->inst_cnt_ism, ism_addr);
+ (void *)(uintptr_t)iq->inst_cnt_ism, ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
@@ -235,10 +235,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
- oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ oq_no, (void *)(uintptr_t)droq->pkts_sent_ism, ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 25e0e5a..2aeebb4 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -300,10 +300,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
- (void *)iq->inst_cnt_ism,
+ (void *)(uintptr_t)iq->inst_cnt_ism,
(unsigned int)ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
@@ -386,10 +386,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
- (void *)droq->pkts_sent_ism,
+ (void *)(uintptr_t)droq->pkts_sent_ism,
(unsigned int)ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 7776940..73eb0c9 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -218,7 +218,7 @@ struct otx_ep_iq_config {
*/
struct otx_ep_instr_queue {
/* Location in memory updated by SDP ISM */
- uint32_t *inst_cnt_ism;
+ RTE_ATOMIC(uint32_t) *inst_cnt_ism;
struct rte_mbuf **mbuf_list;
/* Pointer to the Virtual Base addr of the input ring. */
uint8_t *base_addr;
@@ -413,7 +413,7 @@ struct otx_ep_droq {
uint8_t ism_ena;
/* Pointer to host memory copy of output packet count, set by ISM */
- uint32_t *pkts_sent_ism;
+ RTE_ATOMIC(uint32_t) *pkts_sent_ism;
uint32_t pkts_sent_prev;
/* Statistics for this DROQ. */
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 59144e0..eb2d8c1 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -475,7 +475,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
@@ -871,7 +872,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 11/45] net/octeontx: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (9 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 10/45] net/octeon_ep: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 12/45] net/cxgbe: " Tyler Retzlaff
` (33 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/octeontx/octeontx_ethdev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index e397136..3c21540 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -31,7 +31,7 @@
/* Useful in stopping/closing event device if no of
* eth ports are using it.
*/
-uint16_t evdev_refcnt;
+RTE_ATOMIC(uint16_t) evdev_refcnt;
#define OCTEONTX_QLM_MODE_SGMII 7
#define OCTEONTX_QLM_MODE_XFI 12
@@ -559,7 +559,7 @@ enum octeontx_link_speed {
return 0;
/* Stopping/closing event device once all eth ports are closed. */
- if (__atomic_fetch_sub(&evdev_refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&evdev_refcnt, 1, rte_memory_order_acquire) - 1 == 0) {
rte_event_dev_stop(nic->evdev);
rte_event_dev_close(nic->evdev);
}
@@ -1593,7 +1593,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
nic->pko_vfid = pko_vfid;
nic->port_id = port;
nic->evdev = evdev;
- __atomic_fetch_add(&evdev_refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&evdev_refcnt, 1, rte_memory_order_acquire);
res = octeontx_port_open(nic);
if (res < 0)
@@ -1844,7 +1844,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
}
}
- __atomic_store_n(&evdev_refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&evdev_refcnt, 0, rte_memory_order_release);
/*
* Do 1:1 links for ports & queues. All queues would be mapped to
* one port. If there are more ports than queues, then some ports
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 12/45] net/cxgbe: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (10 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 11/45] net/octeontx: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 13/45] net/gve: " Tyler Retzlaff
` (32 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/cxgbe/clip_tbl.c | 12 ++++++------
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 ++++++++++----------
drivers/net/cxgbe/cxgbe_ofld.h | 6 +++---
drivers/net/cxgbe/l2t.c | 12 ++++++------
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +++++++++++----------
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 ++++++------
drivers/net/cxgbe/smt.h | 2 +-
10 files changed, 46 insertions(+), 45 deletions(-)
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
index b709e26..8588b88 100644
--- a/drivers/net/cxgbe/clip_tbl.c
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -55,7 +55,7 @@ void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
int ret;
t4_os_lock(&ce->lock);
- if (__atomic_fetch_sub(&ce->refcnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ce->refcnt, 1, rte_memory_order_relaxed) - 1 == 0) {
ret = clip6_release_mbox(dev, ce->addr);
if (ret)
dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
@@ -79,7 +79,7 @@ static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
unsigned int clipt_size = c->clipt_size;
for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -114,12 +114,12 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce = find_or_alloc_clipe(ctbl, lip);
if (ce) {
t4_os_lock(&ce->lock);
- if (__atomic_load_n(&ce->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&ce->refcnt, rte_memory_order_relaxed) == 0) {
rte_memcpy(ce->addr, lip, sizeof(ce->addr));
if (v6) {
ce->type = FILTER_TYPE_IPV6;
- __atomic_store_n(&ce->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ce->refcnt, 1,
+ rte_memory_order_relaxed);
ret = clip6_get_mbox(dev, lip);
if (ret)
dev_debug(adap,
@@ -129,7 +129,7 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce->type = FILTER_TYPE_IPV4;
}
} else {
- __atomic_fetch_add(&ce->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ce->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&ce->lock);
}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
index 3b2be66..439fcf6 100644
--- a/drivers/net/cxgbe/clip_tbl.h
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -13,7 +13,7 @@ struct clip_entry {
enum filter_type type; /* entry type */
u32 addr[4]; /* IPV4 or IPV6 address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct clip_tbl {
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index c479454..2ed21f2 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -418,15 +418,15 @@ void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- __atomic_fetch_sub(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
}
@@ -448,15 +448,15 @@ void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
t->tid_tab[tid] = data;
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
- __atomic_fetch_add(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
index 7a4e30d..fd1e7d8 100644
--- a/drivers/net/cxgbe/cxgbe_ofld.h
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -60,10 +60,10 @@ struct tid_info {
unsigned int atids_in_use;
/* TIDs in the TCAM */
- u32 tids_in_use;
+ RTE_ATOMIC(u32) tids_in_use;
/* TIDs in the HASH */
- u32 hash_tids_in_use;
- u32 conns_in_use;
+ RTE_ATOMIC(u32) hash_tids_in_use;
+ RTE_ATOMIC(u32) conns_in_use;
alignas(RTE_CACHE_LINE_SIZE) rte_spinlock_t atid_lock;
rte_spinlock_t ftid_lock;
diff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c
index 21f4019..ecb5fec 100644
--- a/drivers/net/cxgbe/l2t.c
+++ b/drivers/net/cxgbe/l2t.c
@@ -14,8 +14,8 @@
*/
void cxgbe_l2t_release(struct l2t_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -112,7 +112,7 @@ static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
struct l2t_entry *first_free = NULL;
for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -151,18 +151,18 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
diff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h
index e4c0ebe..67d0197 100644
--- a/drivers/net/cxgbe/l2t.h
+++ b/drivers/net/cxgbe/l2t.h
@@ -30,7 +30,7 @@ struct l2t_entry {
u8 lport; /* destination port */
u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct l2t_data {
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
index 8e0da9c..79a7daa 100644
--- a/drivers/net/cxgbe/mps_tcam.c
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -76,7 +76,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- __atomic_fetch_add(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
@@ -98,7 +98,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
entry = &mpstcam->entry[ret];
memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
if (cxgbe_update_free_idx(mpstcam))
@@ -147,7 +147,7 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
* provided value is -1
*/
if (entry->state == MPS_ENTRY_UNUSED) {
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
}
@@ -165,7 +165,7 @@ static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
{
memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_UNUSED;
}
@@ -190,12 +190,13 @@ int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
return -EINVAL;
}
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
entry->mask, idx, 1, pi->port_id,
false);
else
- ret = __atomic_fetch_sub(&entry->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&entry->refcnt, 1,
+ rte_memory_order_relaxed) - 1;
if (ret == 0) {
reset_mpstcam_entry(entry);
@@ -222,7 +223,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
goto out_unlock;
ret = t4_alloc_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -231,7 +232,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
@@ -253,7 +254,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) != 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) != 1)
goto out_unlock;
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -262,7 +263,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
diff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h
index 363786b..4b421f7 100644
--- a/drivers/net/cxgbe/mps_tcam.h
+++ b/drivers/net/cxgbe/mps_tcam.h
@@ -29,7 +29,7 @@ struct mps_tcam_entry {
u8 mask[RTE_ETHER_ADDR_LEN];
struct mpstcam_table *mpstcam; /* backptr */
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
};
struct mpstcam_table {
diff --git a/drivers/net/cxgbe/smt.c b/drivers/net/cxgbe/smt.c
index 4e14a73..2f961c1 100644
--- a/drivers/net/cxgbe/smt.c
+++ b/drivers/net/cxgbe/smt.c
@@ -119,7 +119,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end, *first_free = NULL;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -156,7 +156,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_smte(s, smac);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->pfvf = pfvf;
rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
ret = write_smt_entry(dev, e);
@@ -168,9 +168,9 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
goto out_write_unlock;
}
e->state = SMT_STATE_SWITCHING;
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
@@ -195,8 +195,8 @@ struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
void cxgbe_smt_release(struct smt_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/smt.h b/drivers/net/cxgbe/smt.h
index 531810e..8b378ae 100644
--- a/drivers/net/cxgbe/smt.h
+++ b/drivers/net/cxgbe/smt.h
@@ -23,7 +23,7 @@ struct smt_entry {
u16 pfvf;
u16 hw_idx;
u8 src_mac[RTE_ETHER_ADDR_LEN];
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
rte_spinlock_t lock;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 13/45] net/gve: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (11 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 12/45] net/cxgbe: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 14/45] net/memif: " Tyler Retzlaff
` (31 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/gve/base/gve_osdep.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/gve/base/gve_osdep.h b/drivers/net/gve/base/gve_osdep.h
index a3702f4..c0ee0d5 100644
--- a/drivers/net/gve/base/gve_osdep.h
+++ b/drivers/net/gve/base/gve_osdep.h
@@ -135,7 +135,7 @@ struct gve_dma_mem {
static inline void *
gve_alloc_dma_mem(struct gve_dma_mem *mem, u64 size)
{
- static uint16_t gve_dma_memzone_id;
+ static RTE_ATOMIC(uint16_t) gve_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -143,7 +143,7 @@ struct gve_dma_mem {
return NULL;
snprintf(z_name, sizeof(z_name), "gve_dma_%u",
- __atomic_fetch_add(&gve_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&gve_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG,
PAGE_SIZE);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 14/45] net/memif: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (12 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 13/45] net/gve: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 15/45] net/thunderx: " Tyler Retzlaff
` (30 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/memif/memif.h | 4 ++--
drivers/net/memif/rte_eth_memif.c | 50 +++++++++++++++++++--------------------
2 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/memif/memif.h b/drivers/net/memif/memif.h
index f5a4693..3f5b407 100644
--- a/drivers/net/memif/memif.h
+++ b/drivers/net/memif/memif.h
@@ -169,9 +169,9 @@ typedef struct __rte_packed __rte_aligned(128)
uint32_t cookie; /**< MEMIF_COOKIE */
uint16_t flags; /**< flags */
#define MEMIF_RING_FLAG_MASK_INT 1 /**< disable interrupt mode */
- uint16_t head; /**< pointer to ring buffer head */
+ RTE_ATOMIC(uint16_t) head; /**< pointer to ring buffer head */
MEMIF_CACHELINE_ALIGN_MARK(cacheline1);
- uint16_t tail; /**< pointer to ring buffer tail */
+ RTE_ATOMIC(uint16_t) tail; /**< pointer to ring buffer tail */
MEMIF_CACHELINE_ALIGN_MARK(cacheline2);
memif_desc_t desc[0]; /**< buffer descriptors */
} memif_ring_t;
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 18377d9..16da22b 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -262,7 +262,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* in function eth_memif_rx for C2S queues.
*/
- cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ cur_tail = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
while (mq->last_tail != cur_tail) {
RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
@@ -334,10 +334,10 @@ struct mp_region_msg {
if (type == MEMIF_RING_C2S) {
cur_slot = mq->last_head;
- last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire);
} else {
cur_slot = mq->last_tail;
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
}
if (cur_slot == last_slot)
@@ -473,7 +473,7 @@ struct mp_region_msg {
no_free_bufs:
if (type == MEMIF_RING_C2S) {
- __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, cur_slot, rte_memory_order_release);
mq->last_head = cur_slot;
} else {
mq->last_tail = cur_slot;
@@ -485,7 +485,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
while (n_slots--) {
@@ -493,7 +493,7 @@ struct mp_region_msg {
d0 = &ring->desc[s0];
d0->length = pmd->run.pkt_buffer_size;
}
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
}
mq->n_pkts += n_rx_pkts;
@@ -541,7 +541,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* to synchronize it between threads.
*/
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
if (cur_slot == last_slot)
goto refill;
n_slots = last_slot - cur_slot;
@@ -591,7 +591,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
if (n_slots < 32)
@@ -620,7 +620,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_tx.
*/
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
mq->n_pkts += n_rx_pkts;
@@ -668,9 +668,9 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot +
- __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
} else {
/* For S2C queues ring->tail is updated by the sender and
* this function is called in the context of sending thread.
@@ -678,8 +678,8 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
- n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
+ slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_relaxed);
+ n_free = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire) - slot;
}
uint16_t i;
@@ -792,9 +792,9 @@ struct mp_region_msg {
no_free_slots:
if (type == MEMIF_RING_C2S)
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
else
- __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, slot, rte_memory_order_release);
if (((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) &&
(rte_intr_fd_get(mq->intr_handle) >= 0)) {
@@ -882,7 +882,7 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot + mq->last_tail;
int used_slots;
@@ -942,7 +942,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_rx for C2S rings.
*/
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
/* Send interrupt, if enabled. */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
@@ -1155,8 +1155,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_c2s_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1175,8 +1175,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_s2c_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1314,8 +1314,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
@@ -1330,8 +1330,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 15/45] net/thunderx: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (13 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 14/45] net/memif: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 16/45] net/virtio: " Tyler Retzlaff
` (29 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/thunderx/nicvf_rxtx.c | 9 +++++----
drivers/net/thunderx/nicvf_struct.h | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 74f43b9..76b6fdb 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -374,8 +374,8 @@ static const alignas(RTE_CACHE_LINE_SIZE) uint32_t ptype_table[16][16] = {
NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
(nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
- next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
- __ATOMIC_ACQUIRE);
+ next_tail = rte_atomic_fetch_add_explicit(&rbdr->next_tail, to_fill,
+ rte_memory_order_acquire);
ltail = next_tail;
for (i = 0; i < to_fill; i++) {
struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
@@ -385,9 +385,10 @@ static const alignas(RTE_CACHE_LINE_SIZE) uint32_t ptype_table[16][16] = {
ltail++;
}
- rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&rbdr->tail, next_tail,
+ rte_memory_order_relaxed);
- __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&rbdr->tail, ltail, rte_memory_order_release);
nicvf_addr_write(door, to_fill);
return to_fill;
}
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index cfcd942..60d3ec0 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -20,8 +20,8 @@ struct __rte_cache_aligned nicvf_rbdr {
struct rbdr_entry_t *desc;
nicvf_iova_addr_t phys;
uint32_t buffsz;
- uint32_t tail;
- uint32_t next_tail;
+ RTE_ATOMIC(uint32_t) tail;
+ RTE_ATOMIC(uint32_t) next_tail;
uint32_t head;
uint32_t qlen_mask;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 16/45] net/virtio: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (14 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 15/45] net/thunderx: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 17/45] net/hinic: " Tyler Retzlaff
` (28 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/virtio/virtio_ring.h | 4 +--
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 ++++-----
drivers/net/virtio/virtqueue.h | 32 ++++++++++++------------
3 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b..2a25751 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -59,7 +59,7 @@ struct vring_used_elem {
struct vring_used {
uint16_t flags;
- uint16_t idx;
+ RTE_ATOMIC(uint16_t) idx;
struct vring_used_elem ring[];
};
@@ -70,7 +70,7 @@ struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t id;
- uint16_t flags;
+ RTE_ATOMIC(uint16_t) flags;
};
#define RING_EVENT_FLAGS_ENABLE 0x0
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4fdfe70..24e2b2c 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -948,7 +948,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+ uint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -1037,8 +1037,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vring->desc[vq->used_idx].flags, flags,
+ rte_memory_order_release);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {
@@ -1057,9 +1057,9 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
struct vring *vring = &dev->vrings.split[queue_idx];
/* Consume avail ring, using used ring idx as first one */
- while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ while (rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
!= vring->avail->idx) {
- avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ avail_idx = rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
& (vring->num - 1);
desc_idx = vring->avail->ring[avail_idx];
@@ -1070,7 +1070,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
uep->id = desc_idx;
uep->len = n_descs;
- __atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&vring->used->idx, 1, rte_memory_order_relaxed);
}
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 75d70f1..60211a4 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -37,7 +37,7 @@
virtio_mb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
else
rte_mb();
}
@@ -46,7 +46,7 @@
virtio_rmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
else
rte_io_rmb();
}
@@ -55,7 +55,7 @@
virtio_wmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
else
rte_io_wmb();
}
@@ -67,12 +67,12 @@
uint16_t flags;
if (weak_barriers) {
-/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
+/* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+ flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);
#else
flags = dp->flags;
rte_io_rmb();
@@ -90,12 +90,12 @@
uint16_t flags, uint8_t weak_barriers)
{
if (weak_barriers) {
-/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
+/* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);
#else
rte_io_wmb();
dp->flags = flags;
@@ -425,7 +425,7 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
if (vq->hw->weak_barriers) {
/**
- * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+ * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it
* reports a slightly better perf, which comes from the saved
* branch by the compiler.
* The if and else branches are identical with the smp and io
@@ -435,8 +435,8 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
idx = vq->vq_split.ring.used->idx;
rte_smp_rmb();
#else
- idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
- __ATOMIC_ACQUIRE);
+ idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,
+ rte_memory_order_acquire);
#endif
} else {
idx = vq->vq_split.ring.used->idx;
@@ -454,7 +454,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
vq_update_avail_idx(struct virtqueue *vq)
{
if (vq->hw->weak_barriers) {
- /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+ /* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as
* it reports a slightly better perf, which comes from the
* saved branch by the compiler.
* The if and else branches are identical with the smp and
@@ -464,8 +464,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
rte_smp_wmb();
vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
#else
- __atomic_store_n(&vq->vq_split.ring.avail->idx,
- vq->vq_avail_idx, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,
+ vq->vq_avail_idx, rte_memory_order_release);
#endif
} else {
rte_io_wmb();
@@ -528,8 +528,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
- used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
- __ATOMIC_RELAXED); \
+ used_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \
+ rte_memory_order_relaxed); \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
if (virtio_with_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
@@ -546,7 +546,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
" avail.flags=0x%x; used.flags=0x%x", \
(vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
(vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
- __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+ rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, rte_memory_order_relaxed), \
(vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
} while (0)
#else
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 17/45] net/hinic: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (15 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 16/45] net/virtio: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 18/45] net/idpf: " Tyler Retzlaff
` (27 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 7adb6e3..c2cd295 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -1004,7 +1004,7 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
while (pkts < nb_pkts) {
/* 2. current ci is done */
rx_cqe = &rxq->rx_cqe[sw_ci];
- status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
+ status = rte_atomic_load_explicit(&rx_cqe->status, rte_memory_order_acquire);
if (!HINIC_GET_RX_DONE_BE(status))
break;
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 2dde3ec..43c236b 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -33,7 +33,7 @@ struct __rte_cache_aligned hinic_rq_cqe {
#else
struct hinic_rq_cqe {
#endif
- u32 status;
+ RTE_ATOMIC(u32) status;
u32 vlan_len;
u32 offload_type;
u32 rss_hash;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 18/45] net/idpf: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (16 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 17/45] net/hinic: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 19/45] net/qede: " Tyler Retzlaff
` (26 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/idpf/idpf_ethdev.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 86151c9..1df4d6b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -259,8 +259,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed += rte_atomic_load_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -308,7 +308,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&rxq->rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 19/45] net/qede: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (17 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 18/45] net/idpf: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 20/45] net/ring: " Tyler Retzlaff
` (25 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/qede/base/bcm_osal.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2edeb38..abd1186 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -51,11 +51,11 @@ void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
/* Counter to track current memzone allocated */
static uint16_t ecore_mz_count;
-static uint32_t ref_cnt;
+static RTE_ATOMIC(uint32_t) ref_cnt;
int ecore_mz_mapping_alloc(void)
{
- if (__atomic_fetch_add(&ref_cnt, 1, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_fetch_add_explicit(&ref_cnt, 1, rte_memory_order_relaxed) == 0) {
ecore_mz_mapping = rte_calloc("ecore_mz_map",
rte_memzone_max_get(), sizeof(struct rte_memzone *), 0);
}
@@ -68,7 +68,7 @@ int ecore_mz_mapping_alloc(void)
void ecore_mz_mapping_free(void)
{
- if (__atomic_fetch_sub(&ref_cnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ref_cnt, 1, rte_memory_order_relaxed) - 1 == 0) {
rte_free(ecore_mz_mapping);
ecore_mz_mapping = NULL;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 20/45] net/ring: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (18 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 19/45] net/qede: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 21/45] vdpa/mlx5: " Tyler Retzlaff
` (24 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ring/rte_eth_ring.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 48953dd..b16f5d5 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -44,8 +44,8 @@ enum dev_action {
struct ring_queue {
struct rte_ring *rng;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_internals {
@@ -82,7 +82,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts += nb_rx;
else
- __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->rx_pkts, nb_rx, rte_memory_order_relaxed);
return nb_rx;
}
@@ -96,7 +96,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SP_ENQ)
r->tx_pkts += nb_tx;
else
- __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->tx_pkts, nb_tx, rte_memory_order_relaxed);
return nb_tx;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 21/45] vdpa/mlx5: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (19 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 20/45] net/ring: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 22/45] raw/ifpga: " Tyler Retzlaff
` (23 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +++++++++---------
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +++++------
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 ++-
6 files changed, 52 insertions(+), 44 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f900384..98c39a5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -261,8 +261,8 @@
uint32_t timeout = 0;
/* Check and wait all close tasks done. */
- while (__atomic_load_n(&priv->dev_close_progress,
- __ATOMIC_RELAXED) != 0 && timeout < 1000) {
+ while (rte_atomic_load_explicit(&priv->dev_close_progress,
+ rte_memory_order_relaxed) != 0 && timeout < 1000) {
rte_delay_us_sleep(10000);
timeout++;
}
@@ -294,8 +294,8 @@
priv->last_c_thrd_idx = 0;
else
priv->last_c_thrd_idx++;
- __atomic_store_n(&priv->dev_close_progress,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress,
+ 1, rte_memory_order_relaxed);
if (mlx5_vdpa_task_add(priv,
priv->last_c_thrd_idx,
MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
@@ -319,8 +319,8 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress, 0,
+ rte_memory_order_relaxed);
priv->state = MLX5_VDPA_STATE_PROBED;
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
@@ -664,7 +664,9 @@
static int
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t max_queues, index, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
@@ -847,8 +849,8 @@
if (conf_thread_mng.initializer_priv == priv)
if (mlx5_vdpa_mult_threads_create())
goto error;
- __atomic_fetch_add(&conf_thread_mng.refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
+ rte_memory_order_relaxed);
}
if (mlx5_vdpa_create_dev_resources(priv))
goto error;
@@ -937,8 +939,8 @@
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
if (priv->use_c_thread)
- if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
- 1, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
+ 1, rte_memory_order_relaxed) == 1)
mlx5_vdpa_mult_threads_destroy(true);
rte_free(priv);
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 4ce6977..e156520 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type {
struct __rte_aligned(4) mlx5_vdpa_task {
struct mlx5_vdpa_priv *priv;
enum mlx5_vdpa_task_type type;
- uint32_t *remaining_cnt;
- uint32_t *err_cnt;
+ RTE_ATOMIC(uint32_t) *remaining_cnt;
+ RTE_ATOMIC(uint32_t) *err_cnt;
uint32_t idx;
} __rte_packed;
@@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread {
struct mlx5_vdpa_conf_thread_mng {
void *initializer_priv;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
uint32_t max_thrds;
pthread_mutex_t cthrd_lock;
struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
@@ -212,7 +212,7 @@ struct mlx5_vdpa_priv {
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
uint16_t last_c_thrd_idx;
- uint16_t dev_close_progress;
+ RTE_ATOMIC(uint16_t) dev_close_progress;
uint16_t num_mrs; /* Number of memory regions. */
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
@@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num);
int
mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time);
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
int
mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
void
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
index 68ed841..84f611c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
@@ -48,7 +48,7 @@
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num)
{
struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
@@ -70,8 +70,8 @@
return -1;
for (i = 0 ; i < num; i++)
if (task[i].remaining_cnt)
- __atomic_fetch_add(task[i].remaining_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
+ rte_memory_order_relaxed);
/* wake up conf thread. */
pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
@@ -80,16 +80,16 @@
}
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time)
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
{
/* Check and wait all tasks done. */
- while (__atomic_load_n(remaining_cnt,
- __ATOMIC_RELAXED) != 0) {
+ while (rte_atomic_load_explicit(remaining_cnt,
+ rte_memory_order_relaxed) != 0) {
rte_delay_us_sleep(sleep_time);
}
- if (__atomic_load_n(err_cnt,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(err_cnt,
+ rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Tasks done with error.");
return true;
}
@@ -137,8 +137,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to register mr %d.", task.idx);
- __atomic_fetch_add(task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task.err_cnt, 1,
+ rte_memory_order_relaxed);
}
break;
case MLX5_VDPA_TASK_SETUP_VIRTQ:
@@ -149,8 +149,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to setup virtq %d.", task.idx);
- __atomic_fetch_add(
- task.err_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ task.err_cnt, 1, rte_memory_order_relaxed);
}
virtq->enable = 1;
pthread_mutex_unlock(&virtq->virtq_lock);
@@ -164,9 +164,9 @@
DRV_LOG(ERR,
"Failed to stop virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -176,9 +176,9 @@
DRV_LOG(ERR,
"Failed to get negotiated features virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -200,9 +200,9 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(
+ rte_atomic_store_explicit(
&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
break;
case MLX5_VDPA_TASK_PREPARE_VIRTQ:
ret = mlx5_vdpa_virtq_single_resource_prepare(
@@ -211,9 +211,9 @@
DRV_LOG(ERR,
"Failed to prepare virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
break;
default:
@@ -222,8 +222,8 @@
break;
}
if (task.remaining_cnt)
- __atomic_fetch_sub(task.remaining_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(task.remaining_cnt,
+ 1, rte_memory_order_relaxed);
}
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 0fa671f..a207734 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -92,7 +92,9 @@
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
uint64_t features;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index e333f0b..4dfe800 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -279,7 +279,9 @@
uint8_t mode = 0;
int ret = -rte_errno;
uint32_t i, thrd_idx, data[1];
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
(priv->vid, &mode, &priv->vmem_info.size,
&priv->vmem_info.gcd, &priv->vmem_info.entries_num);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 607e290..093cdd0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -666,7 +666,9 @@
{
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
struct rte_vhost_vring vq;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 22/45] raw/ifpga: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (20 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 21/45] vdpa/mlx5: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 23/45] event/opdl: " Tyler Retzlaff
` (22 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/raw/ifpga/ifpga_rawdev.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c
index f89bd3f..78d3c88 100644
--- a/drivers/raw/ifpga/ifpga_rawdev.c
+++ b/drivers/raw/ifpga/ifpga_rawdev.c
@@ -73,7 +73,7 @@
static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
-static int ifpga_monitor_refcnt;
+static RTE_ATOMIC(int) ifpga_monitor_refcnt;
static rte_thread_t ifpga_monitor_start_thread;
static struct ifpga_rawdev *
@@ -512,7 +512,7 @@ static int set_surprise_link_check_aer(
int gsd_enable, ret;
#define MS 1000
- while (__atomic_load_n(&ifpga_monitor_refcnt, __ATOMIC_RELAXED)) {
+ while (rte_atomic_load_explicit(&ifpga_monitor_refcnt, rte_memory_order_relaxed)) {
gsd_enable = 0;
for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
ifpga_rdev = &ifpga_rawdevices[i];
@@ -549,7 +549,7 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 1;
- if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_fetch_add_explicit(&ifpga_monitor_refcnt, 1, rte_memory_order_relaxed)) {
ret = rte_thread_create_internal_control(&ifpga_monitor_start_thread,
"ifpga-mon", ifpga_rawdev_gsd_handle, NULL);
if (ret != 0) {
@@ -573,7 +573,8 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 0;
- if (!(__atomic_fetch_sub(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED) - 1) &&
+ if (!(rte_atomic_fetch_sub_explicit(&ifpga_monitor_refcnt, 1,
+ rte_memory_order_relaxed) - 1) &&
ifpga_monitor_start_thread.opaque_id != 0) {
ret = pthread_cancel((pthread_t)ifpga_monitor_start_thread.opaque_id);
if (ret)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 23/45] event/opdl: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (21 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 22/45] raw/ifpga: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 24/45] event/octeontx: " Tyler Retzlaff
` (21 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/opdl/opdl_ring.c | 80 +++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index e87ffd5..3476f6b 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -47,12 +47,12 @@ struct __rte_cache_aligned shared_state {
/* Last known minimum sequence number of dependencies, used for multi
* thread operation
*/
- uint32_t available_seq;
+ RTE_ATOMIC(uint32_t) available_seq;
char _pad1[RTE_CACHE_LINE_SIZE * 3];
- uint32_t head; /* Head sequence number (for multi thread operation) */
+ RTE_ATOMIC(uint32_t) head; /* Head sequence number (for multi thread operation) */
char _pad2[RTE_CACHE_LINE_SIZE * 3];
struct opdl_stage *stage; /* back pointer */
- uint32_t tail; /* Tail sequence number */
+ RTE_ATOMIC(uint32_t) tail; /* Tail sequence number */
char _pad3[RTE_CACHE_LINE_SIZE * 2];
};
@@ -149,10 +149,10 @@ struct opdl_ring {
available(const struct opdl_stage *s)
{
if (s->threadsafe == true) {
- uint32_t n = __atomic_load_n(&s->shared.available_seq,
- __ATOMIC_ACQUIRE) -
- __atomic_load_n(&s->shared.head,
- __ATOMIC_ACQUIRE);
+ uint32_t n = rte_atomic_load_explicit(&s->shared.available_seq,
+ rte_memory_order_acquire) -
+ rte_atomic_load_explicit(&s->shared.head,
+ rte_memory_order_acquire);
/* Return 0 if available_seq needs to be updated */
return (n <= s->num_slots) ? n : 0;
@@ -168,7 +168,7 @@ struct opdl_ring {
{
uint32_t i;
uint32_t this_tail = s->shared.tail;
- uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+ uint32_t min_seq = rte_atomic_load_explicit(&s->deps[0]->tail, rte_memory_order_acquire);
/* Input stage sequence numbers are greater than the sequence numbers of
* its dependencies so an offset of t->num_slots is needed when
* calculating available slots and also the condition which is used to
@@ -179,16 +179,16 @@ struct opdl_ring {
if (is_input_stage(s)) {
wrap = s->num_slots;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((this_tail - seq) > (this_tail - min_seq))
min_seq = seq;
}
} else {
wrap = 0;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((seq - this_tail) < (min_seq - this_tail))
min_seq = seq;
}
@@ -197,8 +197,8 @@ struct opdl_ring {
if (s->threadsafe == false)
s->available_seq = min_seq + wrap;
else
- __atomic_store_n(&s->shared.available_seq, min_seq + wrap,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.available_seq, min_seq + wrap,
+ rte_memory_order_release);
}
/* Wait until the number of available slots reaches number requested */
@@ -298,7 +298,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -381,18 +381,18 @@ struct opdl_ring {
/* There should be no race condition here. If shared.tail
* matches, no other core can update it until this one does.
*/
- if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&s->shared.tail, rte_memory_order_acquire) ==
tail) {
if (num_entries >= (head - tail)) {
claim_mgr_remove(disclaims);
- __atomic_store_n(&s->shared.tail, head,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, head,
+ rte_memory_order_release);
num_entries -= (head - tail);
} else {
claim_mgr_move_tail(disclaims, num_entries);
- __atomic_store_n(&s->shared.tail,
+ rte_atomic_store_explicit(&s->shared.tail,
num_entries + tail,
- __ATOMIC_RELEASE);
+ rte_memory_order_release);
num_entries = 0;
}
} else if (block == false)
@@ -420,7 +420,7 @@ struct opdl_ring {
opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
false);
- *old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+ *old_head = rte_atomic_load_explicit(&s->shared.head, rte_memory_order_acquire);
while (true) {
bool success;
/* If called by opdl_ring_input(), claim does not need to be
@@ -440,11 +440,10 @@ struct opdl_ring {
if (*num_entries == 0)
return;
- success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+ success = rte_atomic_compare_exchange_weak_explicit(&s->shared.head, old_head,
*old_head + *num_entries,
- true, /* may fail spuriously */
- __ATOMIC_RELEASE, /* memory order on success */
- __ATOMIC_ACQUIRE); /* memory order on fail */
+ rte_memory_order_release, /* memory order on success */
+ rte_memory_order_acquire); /* memory order on fail */
if (likely(success))
break;
rte_pause();
@@ -472,10 +471,11 @@ struct opdl_ring {
/* If another thread started inputting before this one, but hasn't
* finished, we need to wait for it to complete to update the tail.
*/
- rte_wait_until_equal_32(&s->shared.tail, old_head, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&s->shared.tail, old_head,
+ rte_memory_order_acquire);
- __atomic_store_n(&s->shared.tail, old_head + num_entries,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, old_head + num_entries,
+ rte_memory_order_release);
return num_entries;
}
@@ -525,8 +525,8 @@ struct opdl_ring {
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- event = __atomic_load_n(&(ev->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -627,8 +627,8 @@ struct opdl_ring {
num_entries, s->head - old_tail);
num_entries = s->head - old_tail;
}
- __atomic_store_n(&s->shared.tail, num_entries + old_tail,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, num_entries + old_tail,
+ rte_memory_order_release);
}
uint32_t
@@ -657,7 +657,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
@@ -676,7 +676,7 @@ struct opdl_ring {
copy_entries_out(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -755,7 +755,7 @@ struct opdl_ring {
return 0;
}
if (s->threadsafe == false) {
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
s->seq += s->num_claimed;
s->shadow_head = s->head;
s->num_claimed = 0;
@@ -1008,8 +1008,8 @@ struct opdl_ring *
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- event = __atomic_load_n(&(ev_orig->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev_orig->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -1026,9 +1026,9 @@ struct opdl_ring *
if ((event & OPDL_EVENT_MASK) !=
ev_temp) {
- __atomic_store_n(&(ev_orig->event),
- ev_update,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(
+ (uint64_t __rte_atomic *)&ev_orig->event,
+ ev_update, rte_memory_order_release);
ev_updated = true;
}
if (ev_orig->u64 != ev->u64) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 24/45] event/octeontx: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (22 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 23/45] event/opdl: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 25/45] event/dsw: " Tyler Retzlaff
` (20 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/octeontx/timvf_evdev.h | 8 ++++----
drivers/event/octeontx/timvf_worker.h | 36 +++++++++++++++++------------------
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index e7a63e4..3a2dc47 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -126,15 +126,15 @@ enum timvf_clk_src {
struct __rte_aligned(8) tim_mem_bucket {
uint64_t first_chunk;
union {
- uint64_t w1;
+ RTE_ATOMIC(uint64_t) w1;
struct {
- uint32_t nb_entry;
+ RTE_ATOMIC(uint32_t) nb_entry;
uint8_t sbt:1;
uint8_t hbt:1;
uint8_t bsk:1;
uint8_t rsvd:5;
- uint8_t lock;
- int16_t chunk_remainder;
+ RTE_ATOMIC(uint8_t) lock;
+ RTE_ATOMIC(int16_t) chunk_remainder;
};
};
uint64_t current_chunk;
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index e4b923e..de9f1b0 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -19,22 +19,22 @@
static inline int16_t
timr_bkt_get_rem(struct tim_mem_bucket *bktp)
{
- return __atomic_load_n(&bktp->chunk_remainder,
- __ATOMIC_ACQUIRE);
+ return rte_atomic_load_explicit(&bktp->chunk_remainder,
+ rte_memory_order_acquire);
}
static inline void
timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_store_n(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline void
timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_fetch_sub(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline uint8_t
@@ -47,14 +47,14 @@
timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
- return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint8_t
@@ -81,34 +81,34 @@
{
/*Clear everything except lock. */
const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
- __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
- __ATOMIC_RELAXED);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
+ rte_memory_order_relaxed);
}
static inline uint64_t
timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
{
const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline void
timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
}
static inline uint32_t
@@ -121,13 +121,13 @@
static inline void
timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
}
static inline void
timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
{
- __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
}
static inline uint64_t
@@ -135,7 +135,7 @@
{
const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
TIM_BUCKET_W1_S_NUM_ENTRIES);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v;
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
}
static inline struct tim_mem_entry *
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 25/45] event/dsw: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (23 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 24/45] event/octeontx: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 26/45] dma/skeleton: " Tyler Retzlaff
` (19 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.h | 6 +++---
drivers/event/dsw/dsw_event.c | 47 +++++++++++++++++++++++++++---------------
drivers/event/dsw/dsw_xstats.c | 4 ++--
3 files changed, 35 insertions(+), 22 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 3a5989f..2018306 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -227,9 +227,9 @@ struct __rte_cache_aligned dsw_port {
alignas(RTE_CACHE_LINE_SIZE) struct rte_ring *ctl_in_ring;
/* Estimate of current port load. */
- alignas(RTE_CACHE_LINE_SIZE) int16_t load;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int16_t) load;
/* Estimate of flows currently migrating to this port. */
- alignas(RTE_CACHE_LINE_SIZE) int32_t immigration_load;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int32_t) immigration_load;
};
struct dsw_queue {
@@ -252,7 +252,7 @@ struct dsw_evdev {
uint8_t num_queues;
int32_t max_inflight;
- alignas(RTE_CACHE_LINE_SIZE) int32_t credits_on_loan;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int32_t) credits_on_loan;
};
#define DSW_CTL_PAUS_REQ (0)
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 23488d9..70c3c3a 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -33,7 +33,8 @@
}
total_on_loan =
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed);
available = dsw->max_inflight - total_on_loan;
acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
@@ -45,13 +46,16 @@
* allocation.
*/
new_total_on_loan =
- __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED) + acquired_credits;
+ rte_atomic_fetch_add_explicit(&dsw->credits_on_loan,
+ acquired_credits,
+ rte_memory_order_relaxed) +
+ acquired_credits;
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
- __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan,
+ acquired_credits,
+ rte_memory_order_relaxed);
return false;
}
@@ -77,8 +81,9 @@
port->inflight_credits = leave_credits;
- __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan,
+ return_credits,
+ rte_memory_order_relaxed);
DSW_LOG_DP_PORT(DEBUG, port->id,
"Returned %d tokens to pool.\n",
@@ -156,19 +161,22 @@
int16_t period_load;
int16_t new_load;
- old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
+ old_load = rte_atomic_load_explicit(&port->load,
+ rte_memory_order_relaxed);
period_load = dsw_port_load_close_period(port, now);
new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
(DSW_OLD_LOAD_WEIGHT+1);
- __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->load, new_load,
+ rte_memory_order_relaxed);
/* The load of the recently immigrated flows should hopefully
* be reflected the load estimate by now.
*/
- __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->immigration_load, 0,
+ rte_memory_order_relaxed);
}
static void
@@ -390,10 +398,11 @@ struct dsw_queue_flow_burst {
for (i = 0; i < dsw->num_ports; i++) {
int16_t measured_load =
- __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].load,
+ rte_memory_order_relaxed);
int32_t immigration_load =
- __atomic_load_n(&dsw->ports[i].immigration_load,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
+ rte_memory_order_relaxed);
int32_t load = measured_load + immigration_load;
load = RTE_MIN(load, DSW_MAX_LOAD);
@@ -523,8 +532,10 @@ struct dsw_queue_flow_burst {
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
- __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
- candidate_flow_load, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ &dsw->ports[candidate_port_id].immigration_load,
+ candidate_flow_load,
+ rte_memory_order_relaxed);
return true;
}
@@ -882,7 +893,8 @@ struct dsw_queue_flow_burst {
}
source_port_load =
- __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&source_port->load,
+ rte_memory_order_relaxed);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Load %d is below threshold level %d.\n",
@@ -1301,7 +1313,8 @@ struct dsw_queue_flow_burst {
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
+ rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed) >
source_port->new_event_threshold))
return 0;
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index 2a83a28..f61dfd8 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -48,7 +48,7 @@ struct dsw_xstats_port {
static uint64_t
dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
{
- return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
}
static struct dsw_xstat_dev dsw_dev_xstats[] = {
@@ -126,7 +126,7 @@ struct dsw_xstats_port {
{
int16_t load;
- load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
+ load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
return DSW_LOAD_TO_PERCENT(load);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 26/45] dma/skeleton: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (24 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 25/45] event/dsw: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 27/45] crypto/octeontx: " Tyler Retzlaff
` (18 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/dma/skeleton/skeleton_dmadev.c | 5 +++--
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
index 48f88f9..926c188 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.c
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -142,7 +142,7 @@
else if (desc->op == SKELDMA_OP_FILL)
do_fill(desc);
- __atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&hw->completed_count, 1, rte_memory_order_release);
(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
}
@@ -335,7 +335,8 @@
RTE_SET_USED(vchan);
*status = RTE_DMA_VCHAN_IDLE;
- if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
+ if (hw->submitted_count != rte_atomic_load_explicit(&hw->completed_count,
+ rte_memory_order_acquire)
|| hw->zero_req_count == 0)
*status = RTE_DMA_VCHAN_ACTIVE;
return 0;
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
index cfd37d1..0365f64 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.h
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -81,7 +81,7 @@ struct skeldma_hw {
/* Cache delimiter for cpuwork thread's operation data */
alignas(RTE_CACHE_LINE_SIZE) char cache2;
volatile uint32_t zero_req_count;
- uint64_t completed_count;
+ RTE_ATOMIC(uint64_t) completed_count;
};
#endif /* SKELETON_DMADEV_H */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 27/45] crypto/octeontx: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (25 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 26/45] dma/skeleton: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 28/45] common/mlx5: " Tyler Retzlaff
` (17 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 947e1be..bafd0c1 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -652,7 +652,7 @@
if (!rsp_info->sched_type)
ssows_head_wait(ws);
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
}
@@ -896,7 +896,7 @@
pcount = pending_queue_level(pqueue, DEFAULT_CMD_QLEN);
/* Ensure pcount isn't read before data lands */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
count = (nb_ops > pcount) ? pcount : nb_ops;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 28/45] common/mlx5: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (26 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 27/45] crypto/octeontx: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 29/45] common/idpf: " Tyler Retzlaff
` (16 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/mlx5/linux/mlx5_nl.c | 5 +--
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 ++++-----
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 +++++++++---------
drivers/common/mlx5/mlx5_common_utils.h | 6 ++--
drivers/common/mlx5/mlx5_malloc.c | 58 ++++++++++++++++-----------------
7 files changed, 61 insertions(+), 60 deletions(-)
diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
index 61192eb..a5ac4dc 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -175,10 +175,11 @@ struct mlx5_nl_port_info {
uint16_t state; /**< IB device port state (out). */
};
-uint32_t atomic_sn;
+RTE_ATOMIC(uint32_t) atomic_sn;
/* Generate Netlink sequence number. */
-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)
+#define MLX5_NL_SN_GENERATE (rte_atomic_fetch_add_explicit(&atomic_sn, 1, \
+ rte_memory_order_relaxed) + 1)
/**
* Opens a Netlink socket.
diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 9c80277..14c70ed 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -195,7 +195,7 @@ enum mlx5_cqe_status {
/* Prevent speculative reading of other fields in CQE until
* CQE is valid.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
op_code == MLX5_CQE_REQ_ERR))
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 85ec10d..50922ad 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -35,7 +35,7 @@ struct mlx5_range {
/** Memory region for a mempool. */
struct mlx5_mempool_mr {
struct mlx5_pmd_mr pmd_mr;
- uint32_t refcnt; /**< Number of mempools sharing this MR. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
};
/* Mempool registration. */
@@ -56,11 +56,11 @@ struct mlx5_mempool_reg {
{
struct mlx5_mprq_buf *buf = opaque;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
rte_mempool_put(buf->mp, buf);
- } else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0)) {
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ } else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0)) {
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
rte_mempool_put(buf->mp, buf);
}
}
@@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {
unsigned int i;
for (i = 0; i < mpr->mrs_n; i++)
- __atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {
bool ret = false;
for (i = 0; i < mpr->mrs_n; i++)
- ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0;
+ ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0;
return ret;
}
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index aa10b68..a7f1042 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {
/* Multi-Packet RQ buffer header. */
struct __rte_cache_aligned mlx5_mprq_buf {
struct rte_mempool *mp;
- uint16_t refcnt; /* Atomically accessed refcnt. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
struct rte_mbuf_ext_shared_info shinfos[];
/*
* Shared information per stride.
diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index e69d068..4b95d35 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -81,14 +81,14 @@ struct mlx5_list *
while (entry != NULL) {
if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
if (reuse) {
- ret = __atomic_fetch_add(&entry->ref_cnt, 1,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
+ rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
l_const->name, (void *)entry,
entry->ref_cnt);
} else if (lcore_index < MLX5_LIST_GLOBAL) {
- ret = __atomic_load_n(&entry->ref_cnt,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_load_explicit(&entry->ref_cnt,
+ rte_memory_order_relaxed);
}
if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
return entry;
@@ -151,13 +151,13 @@ struct mlx5_list_entry *
{
struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
- uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
- __ATOMIC_RELAXED);
+ uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
+ rte_memory_order_relaxed);
while (inv_cnt != 0 && entry != NULL) {
struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
- if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
LIST_REMOVE(entry, next);
if (l_const->lcores_share)
l_const->cb_clone_free(l_const->ctx, entry);
@@ -217,7 +217,7 @@ struct mlx5_list_entry *
entry->lcore_idx = (uint32_t)lcore_index;
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
l_const->name, lcore_index,
(void *)entry, entry->ref_cnt);
@@ -254,7 +254,7 @@ struct mlx5_list_entry *
l_inconst->gen_cnt++;
rte_rwlock_write_unlock(&l_inconst->lock);
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
(void *)entry, entry->ref_cnt);
return local_entry;
@@ -285,7 +285,7 @@ struct mlx5_list_entry *
{
struct mlx5_list_entry *gentry = entry->gentry;
- if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
if (entry->lcore_idx == (uint32_t)lcore_idx) {
LIST_REMOVE(entry, next);
@@ -294,23 +294,23 @@ struct mlx5_list_entry *
else
l_const->cb_remove(l_const->ctx, entry);
} else {
- __atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+ 1, rte_memory_order_relaxed);
}
if (!l_const->lcores_share) {
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)entry);
return 0;
}
- if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
rte_rwlock_write_lock(&l_inconst->lock);
if (likely(gentry->ref_cnt == 0)) {
LIST_REMOVE(gentry, next);
rte_rwlock_write_unlock(&l_inconst->lock);
l_const->cb_remove(l_const->ctx, gentry);
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)gentry);
return 0;
@@ -377,7 +377,7 @@ struct mlx5_list_entry *
mlx5_list_get_entry_num(struct mlx5_list *list)
{
MLX5_ASSERT(list);
- return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
}
/********************* Hash List **********************/
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index 44eba50..c5eff7a 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -29,7 +29,7 @@
*/
struct mlx5_list_entry {
LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
- alignas(8) uint32_t ref_cnt; /* 0 means, entry is invalid. */
+ alignas(8) RTE_ATOMIC(uint32_t) ref_cnt; /* 0 means, entry is invalid. */
uint32_t lcore_idx;
union {
struct mlx5_list_entry *gentry;
@@ -39,7 +39,7 @@ struct mlx5_list_entry {
struct __rte_cache_aligned mlx5_list_cache {
LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
- uint32_t inv_cnt; /* Invalid entries counter. */
+ RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
};
/**
@@ -111,7 +111,7 @@ struct mlx5_list_const {
struct mlx5_list_inconst {
rte_rwlock_t lock; /* read/write lock. */
volatile uint32_t gen_cnt; /* List modification may update it. */
- volatile uint32_t count; /* number of entries in list. */
+ volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
struct mlx5_list_cache *cache[MLX5_LIST_MAX];
/* Lcore cache, last index is the global cache. */
};
diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
index c58c41d..ef6dabe 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -16,7 +16,7 @@ struct mlx5_sys_mem {
uint32_t init:1; /* Memory allocator initialized. */
uint32_t enable:1; /* System memory select. */
uint32_t reserve:30; /* Reserve. */
- struct rte_memseg_list *last_msl;
+ RTE_ATOMIC(struct rte_memseg_list *) last_msl;
/* last allocated rte memory memseg list. */
#ifdef RTE_LIBRTE_MLX5_DEBUG
uint64_t malloc_sys;
@@ -93,14 +93,14 @@ struct mlx5_sys_mem {
* different with the cached msl.
*/
if (addr && !mlx5_mem_check_msl(addr,
- (struct rte_memseg_list *)__atomic_load_n
- (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
- __atomic_store_n(&mlx5_sys_mem.last_msl,
+ (struct rte_memseg_list *)rte_atomic_load_explicit
+ (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
+ rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
rte_mem_virt2memseg_list(addr),
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
+ rte_memory_order_relaxed);
#endif
}
}
@@ -122,11 +122,11 @@ struct mlx5_sys_mem {
* to check if the memory belongs to rte memory.
*/
if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
- __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+ rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
if (!rte_mem_virt2memseg_list(addr))
return false;
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
#endif
}
return true;
@@ -185,8 +185,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -199,8 +199,8 @@ struct mlx5_sys_mem {
addr = malloc(size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -233,8 +233,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(new_addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -246,8 +246,8 @@ struct mlx5_sys_mem {
new_addr = realloc(addr, size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -259,14 +259,14 @@ struct mlx5_sys_mem {
return;
if (!mlx5_mem_is_rte(addr)) {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
+ rte_memory_order_relaxed);
#endif
mlx5_os_free(addr);
} else {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
+ rte_memory_order_relaxed);
#endif
rte_free(addr);
}
@@ -280,14 +280,14 @@ struct mlx5_sys_mem {
" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
" update:%"PRIi64"",
- __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
#endif
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 29/45] common/idpf: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (27 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 28/45] common/mlx5: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 30/45] common/iavf: " Tyler Retzlaff
` (15 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/idpf/idpf_common_device.h | 6 +++---
drivers/common/idpf/idpf_common_rxtx.c | 14 ++++++++------
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 ++++++++--------
4 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 3834c1f..bfa927a 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -48,7 +48,7 @@ struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
struct virtchnl2_get_capabilities caps;
- volatile uint32_t pend_cmd; /* pending command not finished */
+ volatile RTE_ATOMIC(uint32_t) pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from cp */
uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
@@ -179,8 +179,8 @@ struct idpf_cmd_info {
atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops)
{
uint32_t op_unk = VIRTCHNL2_OP_UNKNOWN;
- bool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ bool ret = rte_atomic_compare_exchange_strong_explicit(&adapter->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
DRV_LOG(ERR, "There is incomplete cmd %d", adapter->pend_cmd);
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 83b131e..b09c58c 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -592,8 +592,8 @@
next_avail = 0;
rx_bufq->nb_rx_hold -= delta;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
return;
@@ -612,8 +612,8 @@
next_avail += nb_refill;
rx_bufq->nb_rx_hold -= nb_refill;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
}
@@ -1093,7 +1093,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(nmb == NULL)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -1203,7 +1204,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index b49b1ed..eeeeed1 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -97,7 +97,7 @@
#define IDPF_RX_SPLIT_BUFQ2_ID 2
struct idpf_rx_stats {
- uint64_t mbuf_alloc_failed;
+ RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
struct idpf_rx_queue {
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index f65e8d5..3b5e124 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -38,8 +38,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -168,8 +168,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
@@ -564,8 +564,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
@@ -638,8 +638,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 30/45] common/iavf: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (28 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 29/45] common/idpf: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 31/45] baseband/acc: " Tyler Retzlaff
` (14 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/iavf/iavf_impl.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
index 8919b0e..c0ff301 100644
--- a/drivers/common/iavf/iavf_impl.c
+++ b/drivers/common/iavf/iavf_impl.c
@@ -18,7 +18,7 @@ enum iavf_status
u64 size,
u32 alignment)
{
- static uint64_t iavf_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) iavf_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -26,7 +26,7 @@ enum iavf_status
return IAVF_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "iavf_dma_%" PRIu64,
- __atomic_fetch_add(&iavf_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&iavf_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 31/45] baseband/acc: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (29 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 30/45] common/iavf: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 32/45] net/txgbe: " Tyler Retzlaff
` (13 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 34 deletions(-)
diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
index 4f666e5..ee50b9c 100644
--- a/drivers/baseband/acc/rte_acc100_pmd.c
+++ b/drivers/baseband/acc/rte_acc100_pmd.c
@@ -3673,8 +3673,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3728,8 +3728,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3742,8 +3742,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3755,8 +3755,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d\n",
desc, rsp.val, descs_in_tb, desc->req.numCBs);
@@ -3793,8 +3793,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3846,8 +3846,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3902,8 +3902,8 @@
uint8_t cbs_in_tb = 1, cb_idx = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3919,8 +3919,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3930,8 +3930,8 @@
/* Read remaining CBs if exists */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
desc, rsp.val, cb_idx, cbs_in_tb);
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index 88b1104..f7c54be 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -3119,7 +3119,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
return -1;
@@ -3157,7 +3158,8 @@
struct rte_bbdev_enc_op *op;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3192,7 +3194,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + 1 > max_requested_ops)
return -1;
@@ -3208,7 +3211,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3220,7 +3224,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
@@ -3246,7 +3251,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3290,7 +3296,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3346,7 +3353,8 @@
uint32_t tb_crc_check = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3362,7 +3370,8 @@
/* Check if last CB in TB is ready to dequeue (and thus the whole TB) - checking sdone bit.
* If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3372,7 +3381,8 @@
/* Read remaining CBs if exists. */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
rsp.val, desc->rsp.add_info_0,
@@ -3790,7 +3800,8 @@
struct rte_bbdev_fft_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4116,7 +4127,8 @@
uint8_t descs_in_op, i;
desc = acc_desc_tail(q, dequeued_ops);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4127,7 +4139,8 @@
/* Get last CB. */
last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - 1);
/* Check if last op is ready to dequeue by checking fdone bit. If not exit. */
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -4137,8 +4150,8 @@
for (i = 1; i < descs_in_op - 1; i++) {
last_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
& q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit(
+ (uint64_t __rte_atomic *)last_desc, rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
}
@@ -4154,7 +4167,8 @@
for (i = 0; i < descs_in_op; i++) {
desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 32/45] net/txgbe: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (30 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 31/45] baseband/acc: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 33/45] net/null: " Tyler Retzlaff
` (12 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/txgbe/txgbe_ethdev.c | 12 +++++++-----
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
3 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b75e889..a58f197 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -595,7 +595,7 @@ static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -2834,7 +2834,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev);
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -2859,7 +2859,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
rte_thread_detach(rte_thread_self());
txgbe_dev_setup_link_alarm_handler(dev);
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -2908,7 +2908,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
} else if (hw->phy.media_type == txgbe_media_type_fiber &&
dev->data->dev_conf.intr_conf.lsc != 0) {
txgbe_dev_wait_setup_link_complete(dev, 0);
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the TXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -2918,7 +2919,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
"txgbe-link",
txgbe_dev_setup_link_thread_handler, dev) < 0) {
PMD_DRV_LOG(ERR, "Create link thread failed!");
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 7e8067c..e8f55f7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -372,7 +372,7 @@ struct txgbe_adapter {
/* For RSS reta table update */
uint8_t rss_reta_updated;
- uint32_t link_thread_running;
+ RTE_ATOMIC(uint32_t) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index f1341fb..1abc190 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -206,7 +206,7 @@ static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->device_id = pci_dev->id.device_id;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 33/45] net/null: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (31 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 32/45] net/txgbe: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 34/45] event/dlb2: " Tyler Retzlaff
` (11 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/null/rte_eth_null.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 7c46004..f4ed3b8 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -37,8 +37,8 @@ struct null_queue {
struct rte_mempool *mb_pool;
struct rte_mbuf *dummy_packet;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_options {
@@ -102,7 +102,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -130,7 +130,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -155,7 +155,7 @@ struct pmd_internals {
rte_pktmbuf_free(bufs[i]);
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -178,7 +178,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 34/45] event/dlb2: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (32 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 33/45] net/null: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 35/45] dma/idxd: " Tyler Retzlaff
` (10 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/dlb2/dlb2.c | 34 +++++++++++++++++-----------------
drivers/event/dlb2/dlb2_priv.h | 13 +++++--------
drivers/event/dlb2/dlb2_xstats.c | 2 +-
3 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 628ddef..0b91f03 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1005,7 +1005,7 @@ struct process_local_port_data
}
dlb2->new_event_limit = config->nb_events_limit;
- __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&dlb2->inflights, 0, rte_memory_order_seq_cst);
/* Save number of ports/queues for this event dev */
dlb2->num_ports = config->nb_event_ports;
@@ -2668,10 +2668,10 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
batch_size = credits;
if (likely(credits &&
- __atomic_compare_exchange_n(
+ rte_atomic_compare_exchange_strong_explicit(
qm_port->credit_pool[type],
- &credits, credits - batch_size, false,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
+ &credits, credits - batch_size,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst)))
return batch_size;
else
return 0;
@@ -2687,7 +2687,7 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
/* Replenish credits, saving one quanta for enqueues */
uint16_t val = ev_port->inflight_credits - quanta;
- __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
ev_port->inflight_credits -= val;
}
}
@@ -2696,8 +2696,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_port *ev_port)
{
- uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
- __ATOMIC_SEQ_CST);
+ uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
+ rte_memory_order_seq_cst);
const int num = 1;
if (unlikely(ev_port->inflight_max < sw_inflights)) {
@@ -2719,8 +2719,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
return 1;
}
- __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
+ rte_memory_order_seq_cst);
ev_port->inflight_credits += (credit_update_quanta);
if (ev_port->inflight_credits < num) {
@@ -3234,17 +3234,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_ldb_credits += num;
if (qm_port->cached_ldb_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_LDB_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_ldb_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
@@ -3252,17 +3252,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_dir_credits += num;
if (qm_port->cached_dir_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_DIR_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_dir_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index 49f1c66..2470ae0 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -348,7 +348,7 @@ struct dlb2_port {
uint32_t dequeue_depth;
enum dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
- uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) *credit_pool[DLB2_NUM_QUEUE_TYPES];
union {
struct {
uint16_t cached_ldb_credits;
@@ -586,7 +586,7 @@ struct dlb2_eventdev {
uint32_t xstats_count_mode_dev;
uint32_t xstats_count_mode_port;
uint32_t xstats_count;
- uint32_t inflights; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) inflights;
uint32_t new_event_limit;
int max_num_events_override;
int num_dir_credits_override;
@@ -623,15 +623,12 @@ struct dlb2_eventdev {
struct {
uint16_t max_ldb_credits;
uint16_t max_dir_credits;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t ldb_credit_pool;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t dir_credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) ldb_credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) dir_credit_pool;
};
struct {
uint16_t max_credits;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) credit_pool;
};
};
uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */
diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
index ff15271..22094f3 100644
--- a/drivers/event/dlb2/dlb2_xstats.c
+++ b/drivers/event/dlb2/dlb2_xstats.c
@@ -173,7 +173,7 @@ struct dlb2_xstats_entry {
case nb_events_limit:
return dlb2->new_event_limit;
case inflight_events:
- return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
+ return rte_atomic_load_explicit(&dlb2->inflights, rte_memory_order_seq_cst);
case ldb_pool_size:
return dlb2->num_ldb_credits;
case dir_pool_size:
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 35/45] dma/idxd: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (33 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 34/45] event/dlb2: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 36/45] crypto/ccp: " Tyler Retzlaff
` (9 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index cd41777..537cf9b 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -33,7 +33,7 @@ struct idxd_pci_common {
rte_spinlock_t lk;
uint8_t wq_cfg_sz;
- uint16_t ref_count;
+ RTE_ATOMIC(uint16_t) ref_count;
volatile struct rte_idxd_bar0 *regs;
volatile uint32_t *wq_regs_base;
volatile struct rte_idxd_grpcfg *grp_regs;
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index a78889a..06fa115 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -136,7 +136,8 @@
* the PCI struct
*/
/* NOTE: review for potential ordering optimization */
- is_last_wq = (__atomic_fetch_sub(&idxd->u.pci->ref_count, 1, __ATOMIC_SEQ_CST) == 1);
+ is_last_wq = (rte_atomic_fetch_sub_explicit(&idxd->u.pci->ref_count, 1,
+ rte_memory_order_seq_cst) == 1);
if (is_last_wq) {
/* disable the device */
err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -330,9 +331,9 @@
return ret;
}
qid = rte_dma_get_dev_id_by_name(qname);
- max_qid = __atomic_load_n(
+ max_qid = rte_atomic_load_explicit(
&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
- __ATOMIC_SEQ_CST);
+ rte_memory_order_seq_cst);
/* we have queue 0 done, now configure the rest of the queues */
for (qid = 1; qid < max_qid; qid++) {
@@ -389,7 +390,7 @@
free(idxd.u.pci);
return ret;
}
- __atomic_fetch_add(&idxd.u.pci->ref_count, 1, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&idxd.u.pci->ref_count, 1, rte_memory_order_seq_cst);
}
return 0;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 36/45] crypto/ccp: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (34 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 35/45] dma/idxd: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 37/45] common/cpt: " Tyler Retzlaff
` (8 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/crypto/ccp/ccp_dev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index b7ca3af..41c1422 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -116,15 +116,15 @@ struct ccp_queue *
static inline void
ccp_set_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ (1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline void
ccp_clear_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_and_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ ~(1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline uint32_t
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 37/45] common/cpt: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (35 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 36/45] crypto/ccp: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 38/45] bus/vmbus: " Tyler Retzlaff
` (7 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/cpt/cpt_common.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index 6596cc0..dee430f 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -73,7 +73,7 @@ struct __rte_aligned(8) cpt_request_info {
const unsigned int qsize)
{
/* Ensure ordering between setting the entry and updating the tail */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
q->tail = (q->tail + cnt) & (qsize - 1);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 38/45] bus/vmbus: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (36 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 37/45] common/cpt: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 39/45] examples: " Tyler Retzlaff
` (6 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
drivers/bus/vmbus/vmbus_channel.c | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/bus/vmbus/rte_vmbus_reg.h b/drivers/bus/vmbus/rte_vmbus_reg.h
index a17ce40..e3299aa 100644
--- a/drivers/bus/vmbus/rte_vmbus_reg.h
+++ b/drivers/bus/vmbus/rte_vmbus_reg.h
@@ -28,7 +28,7 @@ struct vmbus_message {
*/
struct vmbus_mon_trig {
- uint32_t pending;
+ RTE_ATOMIC(uint32_t) pending;
uint32_t armed;
} __rte_packed;
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index 4d74df3..925c2aa 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -19,16 +19,16 @@
#include "private.h"
static inline void
-vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+vmbus_sync_set_bit(volatile RTE_ATOMIC(uint32_t) *addr, uint32_t mask)
{
- /* Use GCC builtin which atomic does atomic OR operation */
- __atomic_fetch_or(addr, mask, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit(addr, mask, rte_memory_order_seq_cst);
}
static inline void
vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
{
- uint32_t *monitor_addr, monitor_mask;
+ RTE_ATOMIC(uint32_t) *monitor_addr;
+ uint32_t monitor_mask;
unsigned int trigger_index;
trigger_index = monitor_id / HV_MON_TRIG_LEN;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 39/45] examples: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (37 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 38/45] bus/vmbus: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 40/45] app/dumpcap: " Tyler Retzlaff
` (5 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
examples/bbdev_app/main.c | 13 +++++----
examples/l2fwd-event/l2fwd_common.h | 4 +--
examples/l2fwd-event/l2fwd_event.c | 24 ++++++++--------
examples/l2fwd-jobstats/main.c | 11 ++++----
.../client_server_mp/mp_server/main.c | 6 ++--
examples/server_node_efd/efd_server/main.c | 6 ++--
examples/vhost/main.c | 32 +++++++++++-----------
examples/vhost/main.h | 4 +--
examples/vhost/virtio_net.c | 13 +++++----
examples/vhost_blk/vhost_blk.c | 8 +++---
examples/vm_power_manager/channel_monitor.c | 9 +++---
11 files changed, 68 insertions(+), 62 deletions(-)
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index d4c686c..7124b49 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -165,7 +165,7 @@ struct stats_lcore_params {
.num_dec_cores = 1,
};
-static uint16_t global_exit_flag;
+static RTE_ATOMIC(uint16_t) global_exit_flag;
/* display usage */
static inline void
@@ -277,7 +277,7 @@ uint16_t bbdev_parse_number(const char *mask)
signal_handler(int signum)
{
printf("\nSignal %d received\n", signum);
- __atomic_store_n(&global_exit_flag, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 1, rte_memory_order_relaxed);
}
static void
@@ -321,7 +321,8 @@ uint16_t bbdev_parse_number(const char *mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME &&
- !__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED); count++) {
+ !rte_atomic_load_explicit(&global_exit_flag,
+ rte_memory_order_relaxed); count++) {
memset(&link, 0, sizeof(link));
link_get_err = rte_eth_link_get_nowait(port_id, &link);
@@ -675,7 +676,7 @@ uint16_t bbdev_parse_number(const char *mask)
{
struct stats_lcore_params *stats_lcore = arg;
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
print_stats(stats_lcore);
rte_delay_ms(500);
}
@@ -921,7 +922,7 @@ uint16_t bbdev_parse_number(const char *mask)
const bool run_decoder = (lcore_conf->core_type &
(1 << RTE_BBDEV_OP_TURBO_DEC));
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
if (run_encoder)
run_encoding(lcore_conf);
if (run_decoder)
@@ -1055,7 +1056,7 @@ uint16_t bbdev_parse_number(const char *mask)
.align = alignof(struct rte_mbuf *),
};
- __atomic_store_n(&global_exit_flag, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 0, rte_memory_order_relaxed);
sigret = signal(SIGTERM, signal_handler);
if (sigret == SIG_ERR)
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index c56b3e7..8cf91b9 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -61,8 +61,8 @@
/* Per-port statistics struct */
struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t dropped;
- uint64_t tx;
- uint64_t rx;
+ RTE_ATOMIC(uint64_t) tx;
+ RTE_ATOMIC(uint64_t) rx;
};
/* Event vector attributes */
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 4b5a032..2247202 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -163,8 +163,8 @@
dst_port = rsrc->dst_ports[mbuf->port];
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].rx,
+ 1, rte_memory_order_relaxed);
mbuf->port = dst_port;
if (flags & L2FWD_EVENT_UPDT_MAC)
@@ -179,8 +179,8 @@
rte_event_eth_tx_adapter_txq_set(mbuf, 0);
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].tx,
+ 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -367,8 +367,8 @@
vec->queue = 0;
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbufs[0]->port].rx,
+ vec->nb_elem, rte_memory_order_relaxed);
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (j < vec->nb_elem)
@@ -382,14 +382,14 @@
}
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[vec->port].tx,
+ vec->nb_elem, rte_memory_order_relaxed);
} else {
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].rx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (j < vec->nb_elem)
rte_prefetch0(
@@ -406,9 +406,9 @@
rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].tx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
}
}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index cb7582a..308b8ed 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -80,7 +80,7 @@ struct __rte_cache_aligned lcore_queue_conf {
struct rte_jobstats idle_job;
struct rte_jobstats_context jobs_context;
- uint16_t stats_read_pending;
+ RTE_ATOMIC(uint16_t) stats_read_pending;
rte_spinlock_t lock;
};
/* >8 End of list of queues to be polled for given lcore. */
@@ -151,9 +151,9 @@ struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t collection_time = rte_get_timer_cycles();
/* Ask forwarding thread to give us stats. */
- __atomic_store_n(&qconf->stats_read_pending, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&qconf->lock);
- __atomic_store_n(&qconf->stats_read_pending, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 0, rte_memory_order_relaxed);
/* Collect context statistics. */
stats_period = ctx->state_time - ctx->start_time;
@@ -522,8 +522,9 @@ struct __rte_cache_aligned l2fwd_port_statistics {
repeats++;
need_manage = qconf->flush_timer.expire < now;
/* Check if we was esked to give a stats. */
- stats_read_pending = __atomic_load_n(&qconf->stats_read_pending,
- __ATOMIC_RELAXED);
+ stats_read_pending = rte_atomic_load_explicit(
+ &qconf->stats_read_pending,
+ rte_memory_order_relaxed);
need_manage |= stats_read_pending;
for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index f54bb8b..ebfc2fe 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -157,12 +157,12 @@ struct client_rx_buf {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
uint32_t status = 0;
/* Only one core should display stats */
- if (__atomic_compare_exchange_n(&display_stats, &status, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &status, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/server_node_efd/efd_server/main.c b/examples/server_node_efd/efd_server/main.c
index fd72882..75ff0ea 100644
--- a/examples/server_node_efd/efd_server/main.c
+++ b/examples/server_node_efd/efd_server/main.c
@@ -177,12 +177,12 @@ struct efd_stats {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
/* Only one core should display stats */
uint32_t display_init = 0;
- if (__atomic_compare_exchange_n(&display_stats, &display_init, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &display_init, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned int sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3fc1b15..4391d88 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1052,10 +1052,10 @@ static unsigned check_ports_num(unsigned nb_ports)
}
if (enable_stats) {
- __atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_total_atomic, 1,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
src_vdev->stats.tx_total++;
src_vdev->stats.tx += ret;
}
@@ -1072,10 +1072,10 @@ static unsigned check_ports_num(unsigned nb_ports)
ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, nr_xmit,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1404,10 +1404,10 @@ static void virtio_tx_offload(struct rte_mbuf *m)
}
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, rx_count,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, enqueue_count,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1832,10 +1832,10 @@ uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
- __ATOMIC_SEQ_CST);
- rx = __atomic_load_n(&vdev->stats.rx_atomic,
- __ATOMIC_SEQ_CST);
+ rx_total = rte_atomic_load_explicit(&vdev->stats.rx_total_atomic,
+ rte_memory_order_seq_cst);
+ rx = rte_atomic_load_explicit(&vdev->stats.rx_atomic,
+ rte_memory_order_seq_cst);
rx_dropped = rx_total - rx;
printf("Statistics for device %d\n"
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index c1c9a42..c986cbc 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -22,8 +22,8 @@
struct device_statistics {
uint64_t tx;
uint64_t tx_total;
- uint64_t rx_atomic;
- uint64_t rx_total_atomic;
+ RTE_ATOMIC(uint64_t) rx_atomic;
+ RTE_ATOMIC(uint64_t) rx_total_atomic;
};
struct vhost_queue {
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 514c8e0..55af6e7 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -198,7 +198,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
+ avail_idx = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire);
start_idx = queue->last_used_idx;
free_entries = avail_idx - start_idx;
count = RTE_MIN(count, free_entries);
@@ -231,7 +232,8 @@
rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
}
- __atomic_fetch_add(&vr->used->idx, count, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, count,
+ rte_memory_order_release);
queue->last_used_idx += count;
rte_vhost_vring_call(dev->vid, queue_id);
@@ -386,8 +388,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
- queue->last_avail_idx;
+ free_entries = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire) - queue->last_avail_idx;
if (free_entries == 0)
return 0;
@@ -442,7 +444,8 @@
queue->last_avail_idx += i;
queue->last_used_idx += i;
- __atomic_fetch_add(&vr->used->idx, i, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, i,
+ rte_memory_order_acq_rel);
rte_vhost_vring_call(dev->vid, queue_id);
diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
index 376f7b8..03f1ac9 100644
--- a/examples/vhost_blk/vhost_blk.c
+++ b/examples/vhost_blk/vhost_blk.c
@@ -85,9 +85,9 @@ struct vhost_blk_ctrlr *
*/
used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
used->idx++;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
vq->id, used->idx, task->req_idx);
@@ -111,12 +111,12 @@ struct vhost_blk_ctrlr *
desc->id = task->buffer_id;
desc->addr = 0;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
if (vq->used_wrap_counter)
desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
else
desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
task->inflight_idx);
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 5fef268..d384c86 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -828,8 +828,9 @@ void channel_monitor_exit(void)
return -1;
uint32_t channel_connected = CHANNEL_MGR_CHANNEL_CONNECTED;
- if (__atomic_compare_exchange_n(&(chan_info->status), &channel_connected,
- CHANNEL_MGR_CHANNEL_PROCESSING, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_connected,
+ CHANNEL_MGR_CHANNEL_PROCESSING, rte_memory_order_relaxed,
+ rte_memory_order_relaxed) == 0)
return -1;
if (pkt->command == RTE_POWER_CPU_POWER) {
@@ -934,8 +935,8 @@ void channel_monitor_exit(void)
* from management thread
*/
uint32_t channel_processing = CHANNEL_MGR_CHANNEL_PROCESSING;
- __atomic_compare_exchange_n(&(chan_info->status), &channel_processing,
- CHANNEL_MGR_CHANNEL_CONNECTED, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_processing,
+ CHANNEL_MGR_CHANNEL_CONNECTED, rte_memory_order_relaxed, rte_memory_order_relaxed);
return 0;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 40/45] app/dumpcap: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (38 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 39/45] examples: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 41/45] app/test: " Tyler Retzlaff
` (4 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/dumpcap/main.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/app/dumpcap/main.c b/app/dumpcap/main.c
index cc0f66b..b25b95e 100644
--- a/app/dumpcap/main.c
+++ b/app/dumpcap/main.c
@@ -51,7 +51,7 @@
/* command line flags */
static const char *progname;
-static bool quit_signal;
+static RTE_ATOMIC(bool) quit_signal;
static bool group_read;
static bool quiet;
static bool use_pcapng = true;
@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv)
static void
signal_handler(int sig_num __rte_unused)
{
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
@@ -490,7 +490,7 @@ static void statistics_loop(void)
printf("%-15s %10s %10s\n",
"Interface", "Received", "Dropped");
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
RTE_ETH_FOREACH_DEV(p) {
if (rte_eth_dev_get_name_by_port(p, name) < 0)
continue;
@@ -528,7 +528,7 @@ static void statistics_loop(void)
static void
monitor_primary(void *arg __rte_unused)
{
- if (__atomic_load_n(&quit_signal, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed))
return;
if (rte_eal_primary_proc_alive(NULL)) {
@@ -536,7 +536,7 @@ static void statistics_loop(void)
} else {
fprintf(stderr,
"Primary process is no longer active, exiting...\n");
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
}
@@ -983,7 +983,7 @@ int main(int argc, char **argv)
show_count(0);
}
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
if (process_ring(out, r) < 0) {
fprintf(stderr, "pcapng file write failed; %s\n",
strerror(errno));
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 41/45] app/test: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (39 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 40/45] app/dumpcap: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 42/45] app/test-eventdev: " Tyler Retzlaff
` (3 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test/test_bpf.c | 46 ++++++++-----
app/test/test_distributor.c | 114 ++++++++++++++++-----------------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++++----
app/test/test_hash_multiwriter.c | 16 ++---
app/test/test_hash_readwrite.c | 74 ++++++++++-----------
app/test/test_hash_readwrite_lf_perf.c | 88 ++++++++++++-------------
app/test/test_lcores.c | 25 ++++----
app/test/test_lpm_perf.c | 14 ++--
app/test/test_mcslock.c | 12 ++--
app/test/test_mempool_perf.c | 9 +--
app/test/test_pflock.c | 13 ++--
app/test/test_pmd_perf.c | 10 +--
app/test/test_rcu_qsbr_perf.c | 114 +++++++++++++++++----------------
app/test/test_ring_perf.c | 11 ++--
app/test/test_ring_stress_impl.h | 10 +--
app/test/test_rwlock.c | 9 +--
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +++----
app/test/test_spinlock.c | 9 +--
app/test/test_stack_perf.c | 12 ++--
app/test/test_threads.c | 33 +++++-----
app/test/test_ticketlock.c | 9 +--
app/test/test_timer.c | 31 +++++----
24 files changed, 378 insertions(+), 343 deletions(-)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index 53e3a31..2e43442 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -39,8 +39,8 @@
*/
struct dummy_offset {
- uint64_t u64;
- uint32_t u32;
+ RTE_ATOMIC(uint64_t) u64;
+ RTE_ATOMIC(uint32_t) u32;
uint16_t u16;
uint8_t u8;
};
@@ -1581,32 +1581,46 @@ struct bpf_test {
memset(&dfe, 0, sizeof(dfe));
rv = 1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = -1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = (int32_t)TEST_FILL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_3;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
}
diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index 13357b9..60fe96e 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -47,14 +47,14 @@ struct worker_params {
struct worker_params worker_params;
/* statics - all zero-initialized by default */
-static volatile int quit; /**< general quit variable for all threads */
-static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
-static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
-static volatile unsigned worker_idx;
-static volatile unsigned zero_idx;
+static volatile RTE_ATOMIC(int) quit; /**< general quit variable for all threads */
+static volatile RTE_ATOMIC(int) zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile RTE_ATOMIC(int) zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
+static volatile RTE_ATOMIC(unsigned int) zero_idx;
struct __rte_cache_aligned worker_stats {
- volatile unsigned handled_packets;
+ volatile RTE_ATOMIC(unsigned int) handled_packets;
};
struct worker_stats worker_stats[RTE_MAX_LCORE];
@@ -66,8 +66,8 @@ struct __rte_cache_aligned worker_stats {
{
unsigned i, count = 0;
for (i = 0; i < worker_idx; i++)
- count += __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED);
+ count += rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed);
return count;
}
@@ -77,8 +77,8 @@ struct __rte_cache_aligned worker_stats {
{
unsigned int i;
for (i = 0; i < RTE_MAX_LCORE; i++)
- __atomic_store_n(&worker_stats[i].handled_packets, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&worker_stats[i].handled_packets, 0,
+ rte_memory_order_relaxed);
}
/* this is the basic worker function for sanity test
@@ -91,17 +91,17 @@ struct __rte_cache_aligned worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -162,8 +162,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with all zero hashes done.\n");
/* pick two flows and check they go correctly */
@@ -189,9 +189,9 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(
+ rte_atomic_load_explicit(
&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_memory_order_relaxed));
printf("Sanity test with two hash values done\n");
}
@@ -218,8 +218,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with non-zero hashes done\n");
rte_mempool_put_bulk(p, (void *)bufs, BURST);
@@ -311,18 +311,18 @@ struct __rte_cache_aligned worker_stats {
struct rte_distributor *d = wp->dist;
unsigned int i;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
rte_pktmbuf_free(buf[i]);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
@@ -381,51 +381,51 @@ struct __rte_cache_aligned worker_stats {
unsigned int num;
unsigned int zero_id = 0;
unsigned int zero_unset;
- const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
- __ATOMIC_RELAXED);
+ const unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
/* wait for quit single globally, or for worker zero, wait
* for zero_quit */
while (!quit && !(id == zero_id && zero_quit)) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
if (id == zero_id) {
rte_distributor_return_pkt(d, id, NULL, 0);
/* for worker zero, allow it to restart to pick up last packet
* when all workers are shutting down.
*/
- __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 1, rte_memory_order_release);
while (zero_quit)
usleep(100);
- __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 0, rte_memory_order_release);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets,
- num, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets,
+ num, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
}
@@ -491,17 +491,17 @@ struct __rte_cache_aligned worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST * 2) {
printf("Line %d: Error, not all packets flushed. "
@@ -560,18 +560,18 @@ struct __rte_cache_aligned worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
@@ -596,18 +596,18 @@ struct __rte_cache_aligned worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num, i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
*seq_field(buf[i]) += id + 1;
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -679,8 +679,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
/* Sort returned packets by sent order (sequence numbers). */
for (i = 0; i < buf_count; i++) {
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index c0ad39d..e678aec 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -31,7 +31,7 @@
/* static vars - zero initialized by default */
static volatile int quit;
-static volatile unsigned worker_idx;
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
struct __rte_cache_aligned worker_stats {
volatile unsigned handled_packets;
@@ -121,7 +121,7 @@ struct __rte_cache_aligned worker_stats {
struct rte_distributor *d = arg;
unsigned int num = 0;
int i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
alignas(RTE_CACHE_LINE_SIZE) struct rte_mbuf *buf[8];
for (i = 0; i < 8; i++)
diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index 9296de2..bae39af 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -53,12 +53,13 @@
#define MAX_LCORES (rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))
-static uint32_t obj_count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) obj_count;
+static RTE_ATOMIC(uint32_t) synchro;
#define WAIT_SYNCHRO_FOR_WORKERS() do { \
if (lcore_self != rte_get_main_lcore()) \
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, \
+ rte_memory_order_relaxed); \
} while(0)
/*
@@ -71,7 +72,8 @@
WAIT_SYNCHRO_FOR_WORKERS();
- __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
+ /* silent the check in the caller */
+ rte_atomic_store_explicit(&obj_count, 1, rte_memory_order_relaxed);
if (rte_eal_init(0, NULL) != -1)
return -1;
@@ -113,7 +115,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
if (rp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -178,7 +180,7 @@
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -244,7 +246,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_hash_create(&hash_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple times simultaneously */
@@ -311,7 +313,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_fbk_hash_create(&fbk_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -376,7 +378,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
if (lpm != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -437,8 +439,8 @@ struct test_case test_cases[] = {
if (pt_case->func == NULL)
return -1;
- __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&obj_count, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -448,7 +450,7 @@ struct test_case test_cases[] = {
rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
}
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
@@ -463,7 +465,7 @@ struct test_case test_cases[] = {
pt_case->clean(lcore_id);
}
- count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
+ count = rte_atomic_load_explicit(&obj_count, rte_memory_order_relaxed);
if (count != 1) {
printf("%s: common object allocated %d times (should be 1)\n",
pt_case->name, count);
diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
index ed9dd41..33d3147 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/app/test/test_hash_multiwriter.c
@@ -43,8 +43,8 @@ struct {
const uint32_t nb_total_tsx_insertion = 4.5*1024*1024;
uint32_t rounded_nb_total_tsx_insertion;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
static int use_htm;
@@ -84,8 +84,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)
tbl_multiwriter_test_params.keys[i]
@@ -166,8 +166,8 @@ struct {
tbl_multiwriter_test_params.found = found;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
/* Get list of enabled cores */
i = 0;
@@ -233,8 +233,8 @@ struct {
printf("No key corrupted during multiwriter insertion.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED)/
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed)/
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf(" cycles per insertion: %llu\n", cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 4997a01..1867376 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -45,14 +45,14 @@ struct {
struct rte_hash *h;
} tbl_rw_test_param;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
-static uint64_t gread_cycles;
-static uint64_t gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
-static uint64_t greads;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrites;
static int
test_hash_readwrite_worker(__rte_unused void *arg)
@@ -110,8 +110,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_rw_test_param.num_insert; i++)
tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
@@ -209,8 +209,8 @@ struct {
int worker_cnt = rte_lcore_count() - 1;
uint32_t tot_insert = 0;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
if (init_params(use_ext, use_htm, use_rw_lf, use_jhash) != 0)
goto err;
@@ -269,8 +269,8 @@ struct {
printf("No key corrupted during read-write test.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
@@ -310,8 +310,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, i, rte_memory_order_relaxed);
return 0;
}
@@ -344,9 +344,9 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rw_test_param.num_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rw_test_param.num_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -369,11 +369,11 @@ struct {
uint64_t start = 0, end = 0;
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
if (init_params(0, use_htm, 0, use_jhash) != 0)
goto err;
@@ -430,10 +430,10 @@ struct {
if (tot_worker_lcore < core_cnt[n] * 2)
goto finish;
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -475,8 +475,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_only[n] = cycles_per_insertion;
printf("Reader only: cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -484,17 +484,17 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->write_only[n] = cycles_per_insertion;
printf("Writer only: cycles per writes: %llu\n",
cycles_per_insertion);
}
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -569,8 +569,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_write_r[n] = cycles_per_insertion;
printf("Read-write cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -578,8 +578,8 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->read_write_w[n] = cycles_per_insertion;
printf("Read-write cycles per writes: %llu\n",
cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite_lf_perf.c b/app/test/test_hash_readwrite_lf_perf.c
index 5d18850..4523985 100644
--- a/app/test/test_hash_readwrite_lf_perf.c
+++ b/app/test/test_hash_readwrite_lf_perf.c
@@ -86,10 +86,10 @@ struct rwc_perf {
struct rte_hash *h;
} tbl_rwc_test_param;
-static uint64_t gread_cycles;
-static uint64_t greads;
-static uint64_t gwrite_cycles;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gwrites;
static volatile uint8_t writer_done;
@@ -651,8 +651,8 @@ struct rwc_perf {
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, read_cnt*loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, read_cnt*loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -724,8 +724,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -742,8 +742,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_hit[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -791,8 +791,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -811,8 +811,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_miss[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -861,8 +861,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -884,8 +884,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_nsp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -935,8 +935,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -958,8 +958,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_sp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1007,8 +1007,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1030,8 +1030,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
}
@@ -1087,9 +1087,9 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n",
rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0,
+ rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1127,10 +1127,10 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles,
- __ATOMIC_RELAXED) /
- __atomic_load_n(&greads,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles,
+ rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads,
+ rte_memory_order_relaxed);
rwc_perf_results->multi_rw[m][k][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n",
@@ -1178,8 +1178,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
write_type = WRITE_NO_KEY_SHIFT;
@@ -1210,8 +1210,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_extbkt[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1280,9 +1280,9 @@ struct rwc_perf {
tbl_rwc_test_param.keys_no_ks + i);
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rwc_test_param.single_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -1328,8 +1328,8 @@ struct rwc_perf {
rwc_core_cnt[n];
printf("\nNumber of writers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
rte_rcu_qsbr_init(rv, RTE_MAX_LCORE);
@@ -1364,8 +1364,8 @@ struct rwc_perf {
rte_eal_mp_wait_lcore();
unsigned long long cycles_per_write_operation =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
rwc_perf_results->writer_add_del[n]
= cycles_per_write_operation;
printf("Cycles per write operation: %llu\n",
diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index 3434a0d..bd5c0dd 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -10,6 +10,7 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_thread.h>
+#include <rte_stdatomic.h>
#include "test.h"
@@ -25,7 +26,7 @@ struct thread_context {
enum { Thread_INIT, Thread_ERROR, Thread_DONE } state;
bool lcore_id_any;
rte_thread_t id;
- unsigned int *registered_count;
+ RTE_ATOMIC(unsigned int) *registered_count;
};
static uint32_t thread_loop(void *arg)
@@ -49,10 +50,10 @@ static uint32_t thread_loop(void *arg)
t->state = Thread_ERROR;
}
/* Report register happened to the control thread. */
- __atomic_fetch_add(t->registered_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(t->registered_count, 1, rte_memory_order_release);
/* Wait for release from the control thread. */
- while (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(t->registered_count, rte_memory_order_acquire) != 0)
sched_yield();
rte_thread_unregister();
lcore_id = rte_lcore_id();
@@ -73,7 +74,7 @@ static uint32_t thread_loop(void *arg)
{
struct thread_context thread_contexts[RTE_MAX_LCORE];
unsigned int non_eal_threads_count;
- unsigned int registered_count;
+ RTE_ATOMIC(unsigned int) registered_count;
struct thread_context *t;
unsigned int i;
int ret;
@@ -93,7 +94,7 @@ static uint32_t thread_loop(void *arg)
}
printf("non-EAL threads count: %u\n", non_eal_threads_count);
/* Wait all non-EAL threads to register. */
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
@@ -109,14 +110,14 @@ static uint32_t thread_loop(void *arg)
if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
non_eal_threads_count++;
printf("non-EAL threads count: %u\n", non_eal_threads_count);
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
}
skip_lcore_any:
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -225,7 +226,7 @@ struct limit_lcore_context {
struct thread_context thread_contexts[2];
unsigned int non_eal_threads_count = 0;
struct limit_lcore_context l[2] = {};
- unsigned int registered_count = 0;
+ RTE_ATOMIC(unsigned int) registered_count = 0;
struct thread_context *t;
void *handle[2] = {};
unsigned int i;
@@ -275,7 +276,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 1 ||
@@ -298,7 +299,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 2 ||
@@ -315,7 +316,7 @@ struct limit_lcore_context {
}
rte_lcore_dump(stdout);
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -337,7 +338,7 @@ struct limit_lcore_context {
cleanup_threads:
/* Release all threads */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
rte_thread_join(t->id, NULL);
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
index 82daf9e..bc4bdde 100644
--- a/app/test/test_lpm_perf.c
+++ b/app/test/test_lpm_perf.c
@@ -22,8 +22,8 @@
struct rte_lpm *lpm;
static struct rte_rcu_qsbr *rv;
static volatile uint8_t writer_done;
-static volatile uint32_t thr_id;
-static uint64_t gwrite_cycles;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
static uint32_t num_writers;
/* LPM APIs are not thread safe, use spinlock */
@@ -362,7 +362,7 @@ static void generate_large_route_rule_table(void)
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -470,7 +470,7 @@ static void generate_large_route_rule_table(void)
total_cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, total_cycles, rte_memory_order_relaxed);
return 0;
@@ -540,9 +540,9 @@ static void generate_large_route_rule_table(void)
reader_f = test_lpm_reader;
writer_done = 0;
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
/* Launch reader threads */
for (i = j; i < num_cores; i++)
@@ -563,7 +563,7 @@ static void generate_large_route_rule_table(void)
printf("Total LPM Adds: %d\n", TOTAL_WRITES);
printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
printf("Average LPM Add/Del: %"PRIu64" cycles\n",
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed)
/ TOTAL_WRITES);
writer_done = 1;
diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c
index 46ff13c..8fcbc11 100644
--- a/app/test/test_mcslock.c
+++ b/app/test/test_mcslock.c
@@ -42,7 +42,7 @@
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_mcslock_per_core(__rte_unused void *arg)
@@ -75,7 +75,7 @@
rte_mcslock_t ml_perf_me;
/* wait synchro */
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -100,14 +100,14 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lock = 1;
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
@@ -116,11 +116,11 @@
printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index a42a772..130d598 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -88,7 +88,7 @@
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
/* number of objects in one bulk operation (get or put) */
static unsigned n_get_bulk;
@@ -188,7 +188,8 @@ struct __rte_cache_aligned mempool_test_stats {
/* wait synchro for workers */
if (lcore_id != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
start_cycles = rte_get_timer_cycles();
@@ -233,7 +234,7 @@ struct __rte_cache_aligned mempool_test_stats {
int ret;
unsigned cores_save = cores;
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
/* reset stats */
memset(stats, 0, sizeof(stats));
@@ -258,7 +259,7 @@ struct __rte_cache_aligned mempool_test_stats {
}
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
ret = per_lcore_mempool_test(mp);
diff --git a/app/test/test_pflock.c b/app/test/test_pflock.c
index 5f77b15..d989a68 100644
--- a/app/test/test_pflock.c
+++ b/app/test/test_pflock.c
@@ -31,7 +31,7 @@
static rte_pflock_t sl;
static rte_pflock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_pflock_per_core(__rte_unused void *arg)
@@ -69,7 +69,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -99,7 +100,7 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -107,7 +108,7 @@
printf("\nTest with phase-fair lock on single core...\n");
lock = 1;
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -116,12 +117,12 @@
printf("\nPhase-fair test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index 35fa068..995b0a6 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -537,7 +537,7 @@ enum {
return 0;
}
-static uint64_t start;
+static RTE_ATOMIC(uint64_t) start;
static inline int
poll_burst(void *args)
@@ -575,7 +575,7 @@ enum {
num[portid] = pkt_per_port;
}
- rte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_64((uint64_t *)(uintptr_t)&start, 1, rte_memory_order_acquire);
cur_tsc = rte_rdtsc();
while (total) {
@@ -629,9 +629,9 @@ enum {
/* only when polling first */
if (flags == SC_BURST_POLL_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_relaxed);
else
- __atomic_store_n(&start, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 0, rte_memory_order_relaxed);
/* start polling thread
* if in POLL_FIRST mode, poll once launched;
@@ -655,7 +655,7 @@ enum {
/* only when polling second */
if (flags == SC_BURST_XMIT_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_release);
/* wait for polling finished */
diff_tsc = rte_eal_wait_lcore(lcore);
diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c
index ce88a73..d1bf5c5 100644
--- a/app/test/test_rcu_qsbr_perf.c
+++ b/app/test/test_rcu_qsbr_perf.c
@@ -25,13 +25,15 @@
static uint32_t *hash_data[TOTAL_ENTRY];
static volatile uint8_t writer_done;
static volatile uint8_t all_registered;
-static volatile uint32_t thr_id;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
static struct rte_hash *h;
static char hash_name[8];
-static uint64_t updates, checks;
-static uint64_t update_cycles, check_cycles;
+static RTE_ATOMIC(uint64_t) updates;
+static RTE_ATOMIC(uint64_t) checks;
+static RTE_ATOMIC(uint64_t) update_cycles;
+static RTE_ATOMIC(uint64_t) check_cycles;
/* Scale down results to 1000 operations to support lower
* granularity clocks.
@@ -44,7 +46,7 @@
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -81,8 +83,8 @@
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
/* Make the thread offline */
rte_rcu_qsbr_thread_offline(t[0], thread_id);
@@ -113,8 +115,8 @@
} while (loop_cnt < 20000000);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -130,15 +132,15 @@
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
printf("\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\n",
num_cores - 1);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
if (all_registered == 1)
tmp_num_cores = num_cores - 1;
@@ -168,15 +170,16 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -193,10 +196,10 @@
size_t sz;
unsigned int i, tmp_num_cores;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf Test: %d Readers\n", num_cores);
@@ -220,11 +223,11 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -241,10 +244,10 @@
size_t sz;
unsigned int i;
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: %d Writers ('wait' in qsbr_check == false)\n",
num_cores);
@@ -266,10 +269,11 @@
/* Wait until all readers have exited */
rte_eal_mp_wait_lcore();
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -317,8 +321,8 @@
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
rte_rcu_qsbr_thread_unregister(temp, thread_id);
@@ -389,12 +393,12 @@ static struct rte_hash *init_hash(void)
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\n", num_cores);
@@ -453,8 +457,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
@@ -467,12 +471,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -511,7 +515,7 @@ static struct rte_hash *init_hash(void)
printf("Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\n", num_cores);
- __atomic_store_n(&thr_id, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_relaxed);
if (all_registered == 1)
tmp_num_cores = num_cores;
@@ -570,8 +574,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
/* Wait and check return value from reader threads */
@@ -583,12 +587,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -622,10 +626,10 @@ static struct rte_hash *init_hash(void)
return TEST_SKIPPED;
}
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
num_cores = 0;
RTE_LCORE_FOREACH_WORKER(core_id) {
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index d7c5a4c..6d7a0a8 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -186,7 +186,7 @@ struct thread_params {
void *burst = NULL;
#ifdef RTE_USE_C11_MEM_MODEL
- if (__atomic_fetch_add(&lcore_count, 1, __ATOMIC_RELAXED) + 1 != 2)
+ if (rte_atomic_fetch_add_explicit(&lcore_count, 1, rte_memory_order_relaxed) + 1 != 2)
#else
if (__sync_add_and_fetch(&lcore_count, 1) != 2)
#endif
@@ -320,7 +320,7 @@ struct thread_params {
return 0;
}
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static uint64_t queue_count[RTE_MAX_LCORE];
#define TIME_MS 100
@@ -342,7 +342,8 @@ struct thread_params {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (time_diff < hz * TIME_MS / 1000) {
@@ -397,12 +398,12 @@ struct thread_params {
param.r = r;
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lcore_f(¶m);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_ring_stress_impl.h b/app/test/test_ring_stress_impl.h
index 202d47d..8b0bfb1 100644
--- a/app/test/test_ring_stress_impl.h
+++ b/app/test/test_ring_stress_impl.h
@@ -24,7 +24,7 @@ enum {
WRK_CMD_RUN,
};
-static alignas(RTE_CACHE_LINE_SIZE) uint32_t wrk_cmd = WRK_CMD_STOP;
+static alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) wrk_cmd = WRK_CMD_STOP;
/* test run-time in seconds */
static const uint32_t run_time = 60;
@@ -203,7 +203,7 @@ struct __rte_cache_aligned ring_elem {
* really releasing any data through 'wrk_cmd' to
* the worker.
*/
- while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) != WRK_CMD_RUN)
+ while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) != WRK_CMD_RUN)
rte_pause();
cl = rte_rdtsc_precise();
@@ -246,7 +246,7 @@ struct __rte_cache_aligned ring_elem {
lcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);
- } while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) == WRK_CMD_RUN);
+ } while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) == WRK_CMD_RUN);
cl = rte_rdtsc_precise() - cl;
if (prcs == 0)
@@ -360,12 +360,12 @@ struct __rte_cache_aligned ring_elem {
}
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_RUN, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_RUN, rte_memory_order_release);
rte_delay_us(run_time * US_PER_S);
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_STOP, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_STOP, rte_memory_order_release);
/* wait for workers and collect stats. */
mc = rte_lcore_id();
diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index e8767da..6777b91 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -35,7 +35,7 @@
static rte_rwlock_t sl;
static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
enum {
LC_TYPE_RDLOCK,
@@ -101,7 +101,8 @@ struct __rte_cache_aligned try_rwlock_lcore {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -134,12 +135,12 @@ struct __rte_cache_aligned try_rwlock_lcore {
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(NULL);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c
index bab8b0f..c5e5e64 100644
--- a/app/test/test_seqlock.c
+++ b/app/test/test_seqlock.c
@@ -22,7 +22,7 @@ struct __rte_cache_aligned data {
struct reader {
struct data *data;
- uint8_t stop;
+ RTE_ATOMIC(uint8_t) stop;
};
#define WRITER_RUNTIME 2.0 /* s */
@@ -79,7 +79,7 @@ struct reader {
struct reader *r = arg;
int rc = TEST_SUCCESS;
- while (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&
+ while (rte_atomic_load_explicit(&r->stop, rte_memory_order_relaxed) == 0 &&
rc == TEST_SUCCESS) {
struct data *data = r->data;
bool interrupted;
@@ -115,7 +115,7 @@ struct reader {
static void
reader_stop(struct reader *reader)
{
- __atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&reader->stop, 1, rte_memory_order_relaxed);
}
#define NUM_WRITERS 2 /* main lcore + one worker */
diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c
index c12d52d..010ab82 100644
--- a/app/test/test_service_cores.c
+++ b/app/test/test_service_cores.c
@@ -59,15 +59,15 @@ static int32_t dummy_mt_unsafe_cb(void *args)
* test, because two threads are concurrently in a non-MT safe callback.
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so clear pass flag */
*pass_test = 0;
@@ -86,15 +86,15 @@ static int32_t dummy_mt_safe_cb(void *args)
* that 2 threads are running the callback at the same time: MT safe
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so set pass flag */
*pass_test = 1;
@@ -748,15 +748,15 @@ static int32_t dummy_mt_safe_cb(void *args)
/* retrieve done flag and lock to add/sub */
uint32_t *done = ¶ms[0];
- uint32_t *lock = ¶ms[1];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)¶ms[1];
while (!*done) {
- __atomic_fetch_add(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(lock, 1, rte_memory_order_relaxed);
rte_delay_us(500);
- if (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1)
+ if (rte_atomic_load_explicit(lock, rte_memory_order_relaxed) > 1)
/* pass: second core has simultaneously incremented */
*done = 1;
- __atomic_fetch_sub(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(lock, 1, rte_memory_order_relaxed);
}
return 0;
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 9a481f2..a29405a 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -48,7 +48,7 @@
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_spinlock_per_core(__rte_unused void *arg)
@@ -110,7 +110,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -149,11 +150,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c
index c5e1caa..3f17a26 100644
--- a/app/test/test_stack_perf.c
+++ b/app/test/test_stack_perf.c
@@ -23,7 +23,7 @@
*/
static volatile unsigned int bulk_sizes[] = {8, MAX_BURST};
-static uint32_t lcore_barrier;
+static RTE_ATOMIC(uint32_t) lcore_barrier;
struct lcore_pair {
unsigned int c1;
@@ -143,8 +143,8 @@ struct thread_args {
s = args->s;
size = args->sz;
- __atomic_fetch_sub(&lcore_barrier, 1, __ATOMIC_RELAXED);
- rte_wait_until_equal_32(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&lcore_barrier, 1, rte_memory_order_relaxed);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&lcore_barrier, 0, rte_memory_order_relaxed);
uint64_t start = rte_rdtsc();
@@ -173,7 +173,7 @@ struct thread_args {
unsigned int i;
for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
- __atomic_store_n(&lcore_barrier, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 2, rte_memory_order_relaxed);
args[0].sz = args[1].sz = bulk_sizes[i];
args[0].s = args[1].s = s;
@@ -206,7 +206,7 @@ struct thread_args {
int cnt = 0;
double avg;
- __atomic_store_n(&lcore_barrier, n, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, n, rte_memory_order_relaxed);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (++cnt >= n)
@@ -300,7 +300,7 @@ struct thread_args {
struct lcore_pair cores;
struct rte_stack *s;
- __atomic_store_n(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 0, rte_memory_order_relaxed);
s = rte_stack_create(STACK_NAME, STACK_SIZE, rte_socket_id(), flags);
if (s == NULL) {
diff --git a/app/test/test_threads.c b/app/test/test_threads.c
index 4ac3f26..6d6881a 100644
--- a/app/test/test_threads.c
+++ b/app/test/test_threads.c
@@ -6,12 +6,13 @@
#include <rte_thread.h>
#include <rte_debug.h>
+#include <rte_stdatomic.h>
#include "test.h"
RTE_LOG_REGISTER(threads_logtype_test, test.threads, INFO);
-static uint32_t thread_id_ready;
+static RTE_ATOMIC(uint32_t) thread_id_ready;
static uint32_t
thread_main(void *arg)
@@ -19,9 +20,9 @@
if (arg != NULL)
*(rte_thread_t *)arg = rte_thread_self();
- __atomic_store_n(&thread_id_ready, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 1, rte_memory_order_release);
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 1)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 1)
;
return 0;
@@ -37,13 +38,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
@@ -61,13 +62,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main,
&thread_main_id) == 0, "Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_detach(thread_id) == 0,
"Failed to detach thread.");
@@ -85,7 +86,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
priority = RTE_THREAD_PRIORITY_NORMAL;
@@ -121,7 +122,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Priority set mismatches priority get");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -137,7 +138,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset0) == 0,
@@ -190,7 +191,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes affinity thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset1) == 0,
@@ -198,7 +199,7 @@
RTE_TEST_ASSERT(memcmp(&cpuset0, &cpuset1, sizeof(rte_cpuset_t)) == 0,
"Failed to apply affinity attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -219,7 +220,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes priority thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_priority(thread_id, &priority) == 0,
@@ -227,7 +228,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Failed to apply priority attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -243,13 +244,13 @@
thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
index 7a6cb4c..ad4a2d8 100644
--- a/app/test/test_ticketlock.c
+++ b/app/test/test_ticketlock.c
@@ -48,7 +48,7 @@
static rte_ticketlock_recursive_t tlr;
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_ticketlock_per_core(__rte_unused void *arg)
@@ -111,7 +111,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcore_count[lcore] < MAX_LOOP) {
@@ -153,11 +154,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index cac8fc0..dc15a80 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -202,7 +202,7 @@ struct mytimerinfo {
/* Need to synchronize worker lcores through multiple steps. */
enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
-static uint16_t lcore_state[RTE_MAX_LCORE];
+static RTE_ATOMIC(uint16_t) lcore_state[RTE_MAX_LCORE];
static void
main_init_workers(void)
@@ -210,7 +210,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_WAITING, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_WAITING,
+ rte_memory_order_relaxed);
}
}
@@ -220,10 +221,12 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_RUN_SIGNAL, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL,
+ rte_memory_order_release);
}
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_RUNNING,
+ rte_memory_order_acquire);
}
}
@@ -233,7 +236,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_FINISHED,
+ rte_memory_order_acquire);
}
}
@@ -242,8 +246,10 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- rte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, __ATOMIC_ACQUIRE);
- __atomic_store_n(&lcore_state[lcore_id], WORKER_RUNNING, __ATOMIC_RELEASE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[lcore_id], WORKER_RUN_SIGNAL,
+ rte_memory_order_acquire);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING,
+ rte_memory_order_release);
}
static void
@@ -251,7 +257,8 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- __atomic_store_n(&lcore_state[lcore_id], WORKER_FINISHED, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED,
+ rte_memory_order_release);
}
@@ -277,12 +284,12 @@ struct mytimerinfo {
unsigned int lcore_id = rte_lcore_id();
unsigned int main_lcore = rte_get_main_lcore();
int32_t my_collisions = 0;
- static uint32_t collisions;
+ static RTE_ATOMIC(uint32_t) collisions;
if (lcore_id == main_lcore) {
cb_count = 0;
test_failed = 0;
- __atomic_store_n(&collisions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&collisions, 0, rte_memory_order_relaxed);
timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
if (timers == NULL) {
printf("Test Failed\n");
@@ -310,7 +317,7 @@ struct mytimerinfo {
my_collisions++;
}
if (my_collisions != 0)
- __atomic_fetch_add(&collisions, my_collisions, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&collisions, my_collisions, rte_memory_order_relaxed);
/* wait long enough for timers to expire */
rte_delay_ms(100);
@@ -324,7 +331,7 @@ struct mytimerinfo {
/* now check that we get the right number of callbacks */
if (lcore_id == main_lcore) {
- my_collisions = __atomic_load_n(&collisions, __ATOMIC_RELAXED);
+ my_collisions = rte_atomic_load_explicit(&collisions, rte_memory_order_relaxed);
if (my_collisions != 0)
printf("- %d timer reset collisions (OK)\n", my_collisions);
rte_timer_manage();
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 42/45] app/test-eventdev: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (40 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 41/45] app/test: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 43/45] app/test-crypto-perf: " Tyler Retzlaff
` (2 subsequent siblings)
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-eventdev/test_order_atq.c | 4 ++--
app/test-eventdev/test_order_common.c | 5 +++--
app/test-eventdev/test_order_common.h | 8 ++++----
app/test-eventdev/test_order_queue.c | 4 ++--
app/test-eventdev/test_perf_common.h | 6 +++---
5 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 2fee4b4..128d3f2 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index a9894c6..0fceace 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -189,7 +189,7 @@
evt_err("failed to allocate t->expected_flow_seq memory");
goto exp_nomem;
}
- __atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
t->err = false;
t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
@@ -296,7 +296,8 @@
while (t->err == false) {
uint64_t new_cycles = rte_get_timer_cycles();
- int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
+ int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
+ rte_memory_order_relaxed);
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index d4cbc5c..7177fd8 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -48,7 +48,7 @@ struct __rte_cache_aligned test_order {
* The atomic_* is an expensive operation,Since it is a functional test,
* We are using the atomic_ operation to reduce the code complexity.
*/
- uint64_t outstand_pkts;
+ RTE_ATOMIC(uint64_t) outstand_pkts;
enum evt_test_result result;
uint32_t nb_flows;
uint64_t nb_pkts;
@@ -95,7 +95,7 @@ struct __rte_cache_aligned test_order {
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
- uint64_t *const outstand_pkts)
+ RTE_ATOMIC(uint64_t) *const outstand_pkts)
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
@@ -113,7 +113,7 @@ struct __rte_cache_aligned test_order {
*/
expected_flow_seq[flow]++;
rte_pktmbuf_free(ev->mbuf);
- __atomic_fetch_sub(outstand_pkts, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -132,7 +132,7 @@ struct __rte_cache_aligned test_order {
const uint8_t port = w->port_id;\
const uint32_t nb_flows = t->nb_flows;\
uint32_t *expected_flow_seq = t->expected_flow_seq;\
- uint64_t *outstand_pkts = &t->outstand_pkts;\
+ RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
if (opt->verbose_level > 1)\
printf("%s(): lcore %d dev_id %d port=%d\n",\
__func__, rte_lcore_id(), dev_id, port)
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 80eaea5..a282ab2 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index bc627de..d60b873 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -225,7 +225,7 @@ struct __rte_cache_aligned perf_elt {
* stored before updating the number of
* processed packets for worker lcores
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -270,7 +270,7 @@ struct __rte_cache_aligned perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -325,7 +325,7 @@ struct __rte_cache_aligned perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts += vec->nb_elem;
if (enable_fwd_latency) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 43/45] app/test-crypto-perf: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (41 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 42/45] app/test-eventdev: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 44/45] app/test-compress-perf: " Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 45/45] app/test-bbdev: " Tyler Retzlaff
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-crypto-perf/cperf_test_latency.c | 6 +++---
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +++++-----
app/test-crypto-perf/cperf_test_throughput.c | 10 +++++-----
app/test-crypto-perf/cperf_test_verify.c | 10 +++++-----
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 99b7d7c..b8ad6bf 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -136,7 +136,7 @@ struct priv_op_data {
uint32_t imix_idx = 0;
int ret = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
if (ctx == NULL)
return 0;
@@ -341,8 +341,8 @@ struct priv_op_data {
uint16_t exp = 0;
if (ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
"cycles, time (us)");
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 4a60f6d..7191d99 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -396,7 +396,7 @@ struct pmd_cyclecount_state {
state.lcore = rte_lcore_id();
state.linearize = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
static bool warmup = true;
/*
@@ -443,8 +443,8 @@ struct pmd_cyclecount_state {
uint16_t exp = 0;
if (!opts->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
@@ -460,8 +460,8 @@ struct pmd_cyclecount_state {
state.cycles_per_enq,
state.cycles_per_deq);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index e3d266d..c0891e7 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -107,7 +107,7 @@ struct cperf_throughput_ctx {
uint8_t burst_size_idx = 0;
uint32_t imix_idx = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
@@ -277,8 +277,8 @@ struct cperf_throughput_ctx {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst Size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -298,8 +298,8 @@ struct cperf_throughput_ctx {
throughput_gbps,
cycles_per_packet);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 3548509..222c7a1 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -216,7 +216,7 @@ struct cperf_op_result {
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
uint64_t ops_failed = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint64_t i;
uint16_t ops_unused = 0;
@@ -370,8 +370,8 @@ struct cperf_op_result {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -388,8 +388,8 @@ struct cperf_op_result {
ops_deqd_failed,
ops_failed);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore id, Buffer Size(B), "
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Failed Ops\n");
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 44/45] app/test-compress-perf: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (42 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 43/45] app/test-crypto-perf: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
2024-04-19 23:06 ` [PATCH v4 45/45] app/test-bbdev: " Tyler Retzlaff
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 ++--
app/test-compress-perf/comp_perf_test_throughput.c | 10 +++++-----
app/test-compress-perf/comp_perf_test_verify.c | 6 +++---
4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/app/test-compress-perf/comp_perf_test_common.h b/app/test-compress-perf/comp_perf_test_common.h
index d039e5a..085e269 100644
--- a/app/test-compress-perf/comp_perf_test_common.h
+++ b/app/test-compress-perf/comp_perf_test_common.h
@@ -14,7 +14,7 @@ struct cperf_mem_resources {
uint16_t qp_id;
uint8_t lcore_id;
- uint16_t print_info_once;
+ RTE_ATOMIC(uint16_t) print_info_once;
uint32_t total_bufs;
uint8_t *compressed_data;
diff --git a/app/test-compress-perf/comp_perf_test_cyclecount.c b/app/test-compress-perf/comp_perf_test_cyclecount.c
index 4d336ec..64e8faa 100644
--- a/app/test-compress-perf/comp_perf_test_cyclecount.c
+++ b/app/test-compress-perf/comp_perf_test_cyclecount.c
@@ -498,8 +498,8 @@ struct cperf_cyclecount_ctx {
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
diff --git a/app/test-compress-perf/comp_perf_test_throughput.c b/app/test-compress-perf/comp_perf_test_throughput.c
index 1f7072d..089d19c 100644
--- a/app/test-compress-perf/comp_perf_test_throughput.c
+++ b/app/test-compress-perf/comp_perf_test_throughput.c
@@ -336,7 +336,7 @@
struct cperf_benchmark_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->ver.options;
uint32_t lcore = rte_lcore_id();
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
int i, ret = EXIT_SUCCESS;
ctx->ver.mem.lcore_id = lcore;
@@ -345,8 +345,8 @@
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
@@ -413,8 +413,8 @@
}
exp = 0;
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("\n%12s%6s%12s%17s%15s%16s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]",
"Comp [Gbps]", "Decomp [Gbps]");
diff --git a/app/test-compress-perf/comp_perf_test_verify.c b/app/test-compress-perf/comp_perf_test_verify.c
index 7bd1807..09d97c5 100644
--- a/app/test-compress-perf/comp_perf_test_verify.c
+++ b/app/test-compress-perf/comp_perf_test_verify.c
@@ -396,7 +396,7 @@
struct cperf_verify_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->options;
int ret = EXIT_SUCCESS;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint32_t lcore = rte_lcore_id();
uint16_t exp = 0;
@@ -452,8 +452,8 @@
test_data->input_data_sz * 100;
if (!ctx->silent) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("%12s%6s%12s%17s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]");
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v4 45/45] app/test-bbdev: use rte stdatomic API
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
` (43 preceding siblings ...)
2024-04-19 23:06 ` [PATCH v4 44/45] app/test-compress-perf: " Tyler Retzlaff
@ 2024-04-19 23:06 ` Tyler Retzlaff
44 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-04-19 23:06 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Yuying Zhang,
Yuying Zhang, Ziyang Xuan, Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++++++++++++----------------
1 file changed, 110 insertions(+), 73 deletions(-)
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index dcce00a..9694ed3 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -144,7 +144,7 @@ struct test_op_params {
uint16_t num_to_process;
uint16_t num_lcores;
int vector_mask;
- uint16_t sync;
+ RTE_ATOMIC(uint16_t) sync;
struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
};
@@ -159,9 +159,9 @@ struct thread_params {
uint8_t iter_count;
double iter_average;
double bler;
- uint16_t nb_dequeued;
- int16_t processing_status;
- uint16_t burst_sz;
+ RTE_ATOMIC(uint16_t) nb_dequeued;
+ RTE_ATOMIC(int16_t) processing_status;
+ RTE_ATOMIC(uint16_t) burst_sz;
struct test_op_params *op_params;
struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
@@ -3195,56 +3195,64 @@ typedef int (test_case_function)(struct active_device *ad,
}
if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
printf(
"Dequeue interrupt handler called for incorrect event!\n");
return;
}
- burst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);
+ burst_sz = rte_atomic_load_explicit(&tp->burst_sz, rte_memory_order_relaxed);
num_ops = tp->op_params->num_to_process;
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
deq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,
&tp->fft_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
deq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
&tp->mldts_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else /*RTE_BBDEV_OP_TURBO_ENC*/
deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
if (deq < burst_sz) {
printf(
"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
burst_sz, deq);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
- if (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) + deq < num_ops) {
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
return;
}
@@ -3288,7 +3296,8 @@ typedef int (test_case_function)(struct active_device *ad,
if (ret) {
printf("Buffers validation failed\n");
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
}
switch (test_vector.op_type) {
@@ -3315,7 +3324,8 @@ typedef int (test_case_function)(struct active_device *ad,
break;
default:
printf("Unknown op type: %d\n", test_vector.op_type);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
@@ -3324,7 +3334,7 @@ typedef int (test_case_function)(struct active_device *ad,
tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
((double)total_time / (double)rte_get_tsc_hz());
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
}
static int
@@ -3362,10 +3372,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3415,15 +3426,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3459,10 +3472,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3506,15 +3520,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3549,10 +3565,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3592,15 +3609,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3636,10 +3655,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3681,15 +3701,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3725,10 +3747,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3769,15 +3792,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3811,10 +3836,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, num_to_process);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_to_process);
@@ -3851,15 +3877,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3894,7 +3922,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4013,7 +4042,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4148,7 +4178,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4271,7 +4302,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4402,7 +4434,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4503,7 +4536,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4604,7 +4638,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4702,7 +4737,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4898,7 +4934,7 @@ typedef int (test_case_function)(struct active_device *ad,
else
return TEST_SKIPPED;
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -4921,7 +4957,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = bler_function(&t_params[0]);
/* Main core is always used */
@@ -5024,7 +5060,7 @@ typedef int (test_case_function)(struct active_device *ad,
throughput_function = throughput_pmd_lcore_enc;
}
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -5047,7 +5083,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = throughput_function(&t_params[0]);
/* Main core is always used */
@@ -5077,29 +5113,30 @@ typedef int (test_case_function)(struct active_device *ad,
* Wait for main lcore operations.
*/
tp = &t_params[0];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+ (rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed) !=
TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed);
/* Wait for worker lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp = &t_params[used_cores];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
- TEST_FAILED))
+ (rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed) != TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed);
}
/* Print throughput if test passed */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 00/45] use stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (49 preceding siblings ...)
2024-04-19 23:05 ` [PATCH v4 " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 01/45] net/mlx5: use rte " Tyler Retzlaff
` (45 more replies)
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
51 siblings, 46 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
This series converts all non-generic built atomics to use the rte_atomic
macros that allow optional enablement of standard C11 atomics.
Use of generic atomics for non-scalar types are not converted in this
change and will be evaluated as a part of a separate series.
Specifically conversion of lib/lpm and drivers/x/cnxk will be addressed
in a separate series to address use of generics.
v5:
* add missing RTE_ATOMIC for ``struct channel_info.status`` field.
v4:
* rebase after merge of move alignment attribute on types for MSVC,
no other changes.
v3:
* event/dsw, wrap all lines <= 80 chars, align arguments to
opening parenthesis.
* event/dlb2, wrap changed lines <= 80 chars, remove comments
referencing gcc __atomic built-ins.
* bus/vmbus, remove comment referencing gcc atomic built-ins,
fix mistake where monitor_mask was declared RTE_ATOMIC(uint32_t),
fix mistake where pending was not declared RTE_ATOMIC(uint32_t),
remove now unnecessary cast to __rte_atomic of pending (since
the field is now properly declare RTE_ATOMIC).
v2:
* drop the net/sfc driver from the series. the sfc driver
uses generic __atomic_store not handled by the current macros.
the cases where generic __atomic_xxx are used on objects that
can't be accepted by __atomic_xxx_n will be addressed in a
separate series.
Tyler Retzlaff (45):
net/mlx5: use rte stdatomic API
net/ixgbe: use rte stdatomic API
net/iavf: use rte stdatomic API
net/ice: use rte stdatomic API
net/i40e: use rte stdatomic API
net/hns3: use rte stdatomic API
net/bnxt: use rte stdatomic API
net/cpfl: use rte stdatomic API
net/af_xdp: use rte stdatomic API
net/octeon_ep: use rte stdatomic API
net/octeontx: use rte stdatomic API
net/cxgbe: use rte stdatomic API
net/gve: use rte stdatomic API
net/memif: use rte stdatomic API
net/thunderx: use rte stdatomic API
net/virtio: use rte stdatomic API
net/hinic: use rte stdatomic API
net/idpf: use rte stdatomic API
net/qede: use rte stdatomic API
net/ring: use rte stdatomic API
vdpa/mlx5: use rte stdatomic API
raw/ifpga: use rte stdatomic API
event/opdl: use rte stdatomic API
event/octeontx: use rte stdatomic API
event/dsw: use rte stdatomic API
dma/skeleton: use rte stdatomic API
crypto/octeontx: use rte stdatomic API
common/mlx5: use rte stdatomic API
common/idpf: use rte stdatomic API
common/iavf: use rte stdatomic API
baseband/acc: use rte stdatomic API
net/txgbe: use rte stdatomic API
net/null: use rte stdatomic API
event/dlb2: use rte stdatomic API
dma/idxd: use rte stdatomic API
crypto/ccp: use rte stdatomic API
common/cpt: use rte stdatomic API
bus/vmbus: use rte stdatomic API
examples: use rte stdatomic API
app/dumpcap: use rte stdatomic API
app/test: use rte stdatomic API
app/test-eventdev: use rte stdatomic API
app/test-crypto-perf: use rte stdatomic API
app/test-compress-perf: use rte stdatomic API
app/test-bbdev: use rte stdatomic API
app/dumpcap/main.c | 12 +-
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++--------
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 +-
app/test-compress-perf/comp_perf_test_throughput.c | 10 +-
app/test-compress-perf/comp_perf_test_verify.c | 6 +-
app/test-crypto-perf/cperf_test_latency.c | 6 +-
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +-
app/test-crypto-perf/cperf_test_throughput.c | 10 +-
app/test-crypto-perf/cperf_test_verify.c | 10 +-
app/test-eventdev/test_order_atq.c | 4 +-
app/test-eventdev/test_order_common.c | 5 +-
app/test-eventdev/test_order_common.h | 8 +-
app/test-eventdev/test_order_queue.c | 4 +-
app/test-eventdev/test_perf_common.h | 6 +-
app/test/test_bpf.c | 46 ++++--
app/test/test_distributor.c | 114 ++++++-------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++--
app/test/test_hash_multiwriter.c | 16 +-
app/test/test_hash_readwrite.c | 74 ++++-----
app/test/test_hash_readwrite_lf_perf.c | 88 +++++-----
app/test/test_lcores.c | 25 +--
app/test/test_lpm_perf.c | 14 +-
app/test/test_mcslock.c | 12 +-
app/test/test_mempool_perf.c | 9 +-
app/test/test_pflock.c | 13 +-
app/test/test_pmd_perf.c | 10 +-
app/test/test_rcu_qsbr_perf.c | 114 ++++++-------
app/test/test_ring_perf.c | 11 +-
app/test/test_ring_stress_impl.h | 10 +-
app/test/test_rwlock.c | 9 +-
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +--
app/test/test_spinlock.c | 9 +-
app/test/test_stack_perf.c | 12 +-
app/test/test_threads.c | 33 ++--
app/test/test_ticketlock.c | 9 +-
app/test/test_timer.c | 31 ++--
drivers/baseband/acc/rte_acc100_pmd.c | 36 ++--
drivers/baseband/acc/rte_vrb_pmd.c | 46 ++++--
drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
drivers/bus/vmbus/vmbus_channel.c | 8 +-
drivers/common/cpt/cpt_common.h | 2 +-
drivers/common/iavf/iavf_impl.c | 4 +-
drivers/common/idpf/idpf_common_device.h | 6 +-
drivers/common/idpf/idpf_common_rxtx.c | 14 +-
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 +-
drivers/common/mlx5/linux/mlx5_nl.c | 5 +-
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 +-
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 ++--
drivers/common/mlx5/mlx5_common_utils.h | 6 +-
drivers/common/mlx5/mlx5_malloc.c | 58 +++----
drivers/crypto/ccp/ccp_dev.c | 8 +-
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 +-
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +-
drivers/dma/skeleton/skeleton_dmadev.c | 5 +-
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
drivers/event/dlb2/dlb2.c | 34 ++--
drivers/event/dlb2/dlb2_priv.h | 13 +-
drivers/event/dlb2/dlb2_xstats.c | 2 +-
drivers/event/dsw/dsw_evdev.h | 6 +-
drivers/event/dsw/dsw_event.c | 47 ++++--
drivers/event/dsw/dsw_xstats.c | 4 +-
drivers/event/octeontx/timvf_evdev.h | 8 +-
drivers/event/octeontx/timvf_worker.h | 36 ++--
drivers/event/opdl/opdl_ring.c | 80 ++++-----
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 ++-
drivers/net/bnxt/bnxt_cpr.h | 4 +-
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 +-
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 +-
drivers/net/cpfl/cpfl_ethdev.c | 8 +-
drivers/net/cxgbe/clip_tbl.c | 12 +-
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 +--
drivers/net/cxgbe/cxgbe_ofld.h | 6 +-
drivers/net/cxgbe/l2t.c | 12 +-
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +--
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 +-
drivers/net/cxgbe/smt.h | 2 +-
drivers/net/gve/base/gve_osdep.h | 4 +-
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
drivers/net/hns3/hns3_cmd.c | 18 +-
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 ++--
drivers/net/hns3/hns3_ethdev.h | 32 ++--
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++----
drivers/net/hns3/hns3_intr.c | 36 ++--
drivers/net/hns3/hns3_intr.h | 4 +-
drivers/net/hns3/hns3_mbx.c | 6 +-
drivers/net/hns3/hns3_mp.c | 6 +-
drivers/net/hns3/hns3_rxtx.c | 10 +-
drivers/net/hns3/hns3_tm.c | 4 +-
drivers/net/i40e/i40e_ethdev.c | 4 +-
drivers/net/i40e/i40e_rxtx.c | 6 +-
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf.h | 16 +-
drivers/net/iavf/iavf_rxtx.c | 4 +-
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +-
drivers/net/ice/base/ice_osdep.h | 4 +-
drivers/net/ice/ice_dcf.c | 6 +-
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 +-
drivers/net/ice/ice_dcf_parent.c | 16 +-
drivers/net/ice/ice_ethdev.c | 12 +-
drivers/net/ice/ice_ethdev.h | 2 +-
drivers/net/idpf/idpf_ethdev.c | 7 +-
drivers/net/ixgbe/ixgbe_ethdev.c | 14 +-
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 +-
drivers/net/memif/memif.h | 4 +-
drivers/net/memif/rte_eth_memif.c | 50 +++---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 +-
drivers/net/mlx5/mlx5.c | 9 +-
drivers/net/mlx5/mlx5.h | 66 ++++----
drivers/net/mlx5/mlx5_flow.c | 37 +++--
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 +++--
drivers/net/mlx5/mlx5_flow_dv.c | 126 +++++++-------
drivers/net/mlx5/mlx5_flow_flex.c | 14 +-
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++----
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++--
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++--
drivers/net/mlx5/mlx5_hws_cnt.c | 71 ++++----
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +-
drivers/net/mlx5/mlx5_rx.h | 14 +-
drivers/net/mlx5/mlx5_rxq.c | 30 ++--
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 +-
drivers/net/mlx5/mlx5_txpp.c | 84 +++++-----
drivers/net/mlx5/mlx5_txq.c | 12 +-
drivers/net/mlx5/mlx5_utils.c | 10 +-
drivers/net/mlx5/mlx5_utils.h | 4 +-
drivers/net/null/rte_eth_null.c | 12 +-
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +-
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +-
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx2_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx_ep_common.h | 4 +-
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 +-
drivers/net/octeontx/octeontx_ethdev.c | 8 +-
drivers/net/qede/base/bcm_osal.c | 6 +-
drivers/net/ring/rte_eth_ring.c | 8 +-
drivers/net/thunderx/nicvf_rxtx.c | 9 +-
drivers/net/thunderx/nicvf_struct.h | 4 +-
drivers/net/txgbe/txgbe_ethdev.c | 12 +-
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
drivers/net/virtio/virtio_ring.h | 4 +-
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 +-
drivers/net/virtio/virtqueue.h | 32 ++--
drivers/raw/ifpga/ifpga_rawdev.c | 9 +-
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +--
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +-
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++---
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 +-
examples/bbdev_app/main.c | 13 +-
examples/l2fwd-event/l2fwd_common.h | 4 +-
examples/l2fwd-event/l2fwd_event.c | 24 +--
examples/l2fwd-jobstats/main.c | 11 +-
.../client_server_mp/mp_server/main.c | 6 +-
examples/server_node_efd/efd_server/main.c | 6 +-
examples/vhost/main.c | 32 ++--
examples/vhost/main.h | 4 +-
examples/vhost/virtio_net.c | 13 +-
examples/vhost_blk/vhost_blk.c | 8 +-
examples/vm_power_manager/channel_manager.h | 4 +-
examples/vm_power_manager/channel_monitor.c | 9 +-
examples/vm_power_manager/vm_power_cli.c | 3 +-
182 files changed, 1646 insertions(+), 1502 deletions(-)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 01/45] net/mlx5: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 02/45] net/ixgbe: " Tyler Retzlaff
` (44 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 ++-
drivers/net/mlx5/mlx5.c | 9 ++-
drivers/net/mlx5/mlx5.h | 66 ++++++++---------
drivers/net/mlx5/mlx5_flow.c | 37 +++++-----
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 ++++++-----
drivers/net/mlx5/mlx5_flow_dv.c | 126 ++++++++++++++++----------------
drivers/net/mlx5/mlx5_flow_flex.c | 14 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++++++++-------
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++++----
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++++----
drivers/net/mlx5/mlx5_hws_cnt.c | 71 +++++++++---------
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +--
drivers/net/mlx5/mlx5_rx.h | 14 ++--
drivers/net/mlx5/mlx5_rxq.c | 30 ++++----
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 ++---
drivers/net/mlx5/mlx5_txpp.c | 84 ++++++++++-----------
drivers/net/mlx5/mlx5_txq.c | 12 +--
drivers/net/mlx5/mlx5_utils.c | 10 +--
drivers/net/mlx5/mlx5_utils.h | 4 +-
22 files changed, 351 insertions(+), 345 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index 40ea9d2..70bba6c 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -1918,9 +1918,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
return -ENOTSUP;
}
/* Check there is no concurrent mapping in other thread. */
- if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
- base, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
+ base,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
return 0;
}
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index b54f3cc..63da8f4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1117,7 +1117,7 @@
return 0;
}
/* Only need to check refcnt, 0 after "sh" is allocated. */
- if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+ if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
priv->lb_used = 1;
return 0;
@@ -1163,7 +1163,7 @@
claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
sh->self_lb.ibv_cq = NULL;
}
- __atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
return -rte_errno;
#else
RTE_SET_USED(dev);
@@ -1186,8 +1186,9 @@
if (!priv->lb_used)
return;
- MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
- if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {
+ MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
+ if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
if (sh->self_lb.qp) {
claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
sh->self_lb.qp = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d1a6382..2ff94db 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -855,8 +855,8 @@
ct_pool = mng->pools[idx];
for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
ct = &ct_pool->actions[i];
- val = __atomic_fetch_sub(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
MLX5_ASSERT(val == 1);
if (val > 1)
cnt++;
@@ -1082,7 +1082,8 @@
DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
return -ENOTSUP;
}
- if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
+ if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
+ rte_memory_order_relaxed) + 1 > 1)
return 0;
priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
@@ -1173,7 +1174,7 @@
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
- if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
return;
mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
mlx5_free(fp->flex.devx_fp);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3646d20..9e4a5fe 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -378,7 +378,7 @@ struct mlx5_drop {
struct mlx5_lb_ctx {
struct ibv_qp *qp; /* QP object. */
void *ibv_cq; /* Completion queue. */
- uint16_t refcnt; /* Reference count for representors. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
/* HW steering queue job descriptor type. */
@@ -481,10 +481,10 @@ enum mlx5_counter_type {
/* Counter age parameter. */
struct mlx5_age_param {
- uint16_t state; /**< Age state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
uint16_t port_id; /**< Port id of the counter. */
uint32_t timeout:24; /**< Aging timeout in seconds. */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/**< Time in seconds since last hit (atomically accessed). */
void *context; /**< Flow counter age context. */
};
@@ -497,7 +497,7 @@ struct flow_counter_stats {
/* Shared counters information for counters. */
struct mlx5_flow_counter_shared {
union {
- uint32_t refcnt; /* Only for shared action management. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
uint32_t id; /* User counter ID for legacy sharing. */
};
};
@@ -588,7 +588,7 @@ struct mlx5_counter_stats_raw {
/* Counter global management structure. */
struct mlx5_flow_counter_mng {
- volatile uint16_t n_valid; /* Number of valid pools. */
+ volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
uint16_t last_pool_idx; /* Last used pool index */
int min_id; /* The minimum counter ID in the pools. */
int max_id; /* The maximum counter ID in the pools. */
@@ -654,7 +654,7 @@ struct mlx5_aso_sq {
struct mlx5_aso_age_action {
LIST_ENTRY(mlx5_aso_age_action) next;
void *dr_action;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
/* Following fields relevant only when action is active. */
uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
struct mlx5_age_param age_params;
@@ -688,7 +688,7 @@ struct mlx5_geneve_tlv_option_resource {
rte_be16_t option_class; /* geneve tlv opt class.*/
uint8_t option_type; /* geneve tlv opt type.*/
uint8_t length; /* geneve tlv opt length. */
- uint32_t refcnt; /* geneve tlv object reference counter */
+ RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
};
@@ -903,7 +903,7 @@ struct mlx5_flow_meter_policy {
uint16_t group;
/* The group. */
rte_spinlock_t sl;
- uint32_t ref_cnt;
+ RTE_ATOMIC(uint32_t) ref_cnt;
/* Use count. */
struct rte_flow_pattern_template *hws_item_templ;
/* Hardware steering item templates. */
@@ -1038,7 +1038,7 @@ struct mlx5_flow_meter_profile {
struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
/**< srtcm_rfc2697 struct. */
};
- uint32_t ref_cnt; /**< Use count. */
+ RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
uint32_t g_support:1; /**< If G color will be generated. */
uint32_t y_support:1; /**< If Y color will be generated. */
uint32_t initialized:1; /**< Initialized. */
@@ -1078,7 +1078,7 @@ struct mlx5_aso_mtr {
enum mlx5_aso_mtr_type type;
struct mlx5_flow_meter_info fm;
/**< Pointer to the next aso flow meter structure. */
- uint8_t state; /**< ASO flow meter state. */
+ RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
uint32_t offset;
enum rte_color init_color;
};
@@ -1124,7 +1124,7 @@ struct mlx5_flow_mtr_mng {
/* Default policy table. */
uint32_t def_policy_id;
/* Default policy id. */
- uint32_t def_policy_ref_cnt;
+ RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
/** def_policy meter use count. */
struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
/* Meter drop table. */
@@ -1197,8 +1197,8 @@ struct mlx5_txpp_wq {
/* Tx packet pacing internal timestamp. */
struct mlx5_txpp_ts {
- uint64_t ci_ts;
- uint64_t ts;
+ RTE_ATOMIC(uint64_t) ci_ts;
+ RTE_ATOMIC(uint64_t) ts;
};
/* Tx packet pacing structure. */
@@ -1221,12 +1221,12 @@ struct mlx5_dev_txpp {
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
/* Statistics counters. */
- uint64_t err_miss_int; /* Missed service interrupt. */
- uint64_t err_rearm_queue; /* Rearm Queue errors. */
- uint64_t err_clock_queue; /* Clock Queue errors. */
- uint64_t err_ts_past; /* Timestamp in the past. */
- uint64_t err_ts_future; /* Timestamp in the distant future. */
- uint64_t err_ts_order; /* Timestamp not in ascending order. */
+ RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
+ RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
+ RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
+ RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
+ RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
+ RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
};
/* Sample ID information of eCPRI flex parser structure. */
@@ -1287,16 +1287,16 @@ struct mlx5_aso_ct_action {
void *dr_action_orig;
/* General action object for reply dir. */
void *dr_action_rply;
- uint32_t refcnt; /* Action used count in device flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */
uint16_t peer; /* The only peer port index could also use this CT. */
- enum mlx5_aso_ct_state state; /* ASO CT state. */
+ RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
bool is_original; /* The direction of the DR action to be used. */
};
/* CT action object state update. */
#define MLX5_ASO_CT_UPDATE_STATE(c, s) \
- __atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+ rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -1370,7 +1370,7 @@ struct mlx5_flex_pattern_field {
/* Port flex item context. */
struct mlx5_flex_item {
struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
- uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
uint32_t mapnum; /* Number of pattern translation entries. */
struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
@@ -1383,7 +1383,7 @@ struct mlx5_flex_item {
#define MLX5_SRV6_SAMPLE_NUM 5
/* Mlx5 internal flex parser profile structure. */
struct mlx5_internal_flex_parser_profile {
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
struct mlx5_flex_item flex; /* Hold map info for modify field. */
};
@@ -1512,9 +1512,9 @@ struct mlx5_dev_ctx_shared {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
#endif
- struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
- struct mlx5_hlist *modify_cmds;
- struct mlx5_hlist *tag_table;
+ RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
+ RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
+ RTE_ATOMIC(struct mlx5_hlist *) tag_table;
struct mlx5_list *port_id_action_list; /* Port ID action list. */
struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
struct mlx5_list *sample_action_list; /* List of sample actions. */
@@ -1525,7 +1525,7 @@ struct mlx5_dev_ctx_shared {
/* SW steering counters management structure. */
void *default_miss_action; /* Default miss action. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
- struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
+ RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
/* Shared interrupt handler section. */
struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
@@ -1570,7 +1570,7 @@ struct mlx5_dev_ctx_shared {
* Caution, secondary process may rebuild the struct during port start.
*/
struct mlx5_proc_priv {
- void *hca_bar;
+ RTE_ATOMIC(void *) hca_bar;
/* Mapped HCA PCI BAR area. */
size_t uar_table_sz;
/* Size of UAR register table. */
@@ -1635,7 +1635,7 @@ struct mlx5_rxq_obj {
/* Indirection table. */
struct mlx5_ind_table_obj {
LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
union {
void *ind_table; /**< Indirection table. */
struct mlx5_devx_obj *rqt; /* DevX RQT object. */
@@ -1826,7 +1826,7 @@ enum mlx5_quota_state {
};
struct mlx5_quota {
- uint8_t state; /* object state */
+ RTE_ATOMIC(uint8_t) state; /* object state */
uint8_t mode; /* metering mode */
/**
* Keep track of application update types.
@@ -1955,7 +1955,7 @@ struct mlx5_priv {
uint32_t flex_item_map; /* Map of allocated flex item elements. */
uint32_t nb_queue; /* HW steering queue number. */
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
- uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
+ RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -2007,7 +2007,7 @@ struct mlx5_priv {
#endif
struct rte_eth_dev *shared_host; /* Host device for HW steering. */
- uint16_t shared_refcnt; /* HW steering host reference counter. */
+ RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f31fdfb..1954975 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4623,8 +4623,8 @@ struct mlx5_translated_action_handle {
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
- __atomic_fetch_add(&shared_rss->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
+ rte_memory_order_relaxed);
return idx;
default:
break;
@@ -7459,7 +7459,7 @@ struct mlx5_list_entry *
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
- __atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
mlx5_free(default_miss_ctx.queue);
}
mlx5_flow_pop_thread_workspace();
@@ -7470,10 +7470,10 @@ struct mlx5_list_entry *
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
if (rss_desc->shared_rss)
- __atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
+ rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
+ rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
@@ -7976,7 +7976,8 @@ struct rte_flow *
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
+ rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
flow_mreg_del_copy_action(dev, flow);
@@ -9456,7 +9457,7 @@ struct mlx5_flow_workspace*
{
uint32_t pools_n, us;
- pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
+ pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -9558,17 +9559,17 @@ struct mlx5_flow_workspace*
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
age_param = MLX5_CNT_TO_AGE(cnt);
- if (__atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) != AGE_CANDIDATE)
+ if (rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) != AGE_CANDIDATE)
continue;
if (cur->data[i].hits != prev->data[i].hits) {
- __atomic_store_n(&age_param->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
continue;
}
- if (__atomic_fetch_add(&age_param->sec_since_last_hit,
+ if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
time_delta,
- __ATOMIC_RELAXED) + time_delta <= age_param->timeout)
+ rte_memory_order_relaxed) + time_delta <= age_param->timeout)
continue;
/**
* Hold the lock first, or if between the
@@ -9579,10 +9580,10 @@ struct mlx5_flow_workspace*
priv = rte_eth_devices[age_param->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_TMOUT, false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_TMOUT,
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
}
@@ -11407,7 +11408,7 @@ struct tunnel_db_element_release_ctx {
{
struct tunnel_db_element_release_ctx *ctx = x;
ctx->ret = 0;
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index cc1e8cf..9256aec 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1049,7 +1049,7 @@ struct mlx5_flow_tunnel {
LIST_ENTRY(mlx5_flow_tunnel) chain;
struct rte_flow_tunnel app_tunnel; /** app tunnel copy */
uint32_t tunnel_id; /** unique tunnel ID */
- uint32_t refctn;
+ RTE_ATOMIC(uint32_t) refctn;
struct rte_flow_action action;
struct rte_flow_item item;
struct mlx5_hlist *groups; /** tunnel groups */
@@ -1470,7 +1470,7 @@ struct rte_flow_pattern_template {
struct mlx5dr_match_template *mt; /* mlx5 match template. */
uint64_t item_flags; /* Item layer flags. */
uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
/*
* If true, then rule pattern should be prepended with
* represented_port pattern item.
@@ -1502,7 +1502,7 @@ struct rte_flow_actions_template {
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint16_t recom_off; /* Offset of DR IPv6 routing push remove action. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
uint8_t flex_item; /* flex item index. */
};
@@ -1855,7 +1855,7 @@ struct rte_flow_template_table {
/* Shared RSS action structure */
struct mlx5_shared_action_rss {
ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
- uint32_t refcnt; /**< Atomically accessed refcnt. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
struct mlx5_ind_table_obj *ind_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index ab9eb21..a94b228 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -619,7 +619,7 @@
uint8_t *u8addr;
uint8_t hit;
- if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=
AGE_CANDIDATE)
continue;
byte = 63 - (j / 8);
@@ -627,13 +627,13 @@
u8addr = (uint8_t *)addr;
hit = (u8addr[byte] >> offset) & 0x1;
if (hit) {
- __atomic_store_n(&ap->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
} else {
struct mlx5_priv *priv;
- __atomic_fetch_add(&ap->sec_since_last_hit,
- diff, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
+ diff, rte_memory_order_relaxed);
/* If timeout passed add to aged-out list. */
if (ap->sec_since_last_hit <= ap->timeout)
continue;
@@ -641,12 +641,11 @@
rte_eth_devices[ap->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&ap->state,
+ if (rte_atomic_compare_exchange_strong_explicit(&ap->state,
&expected,
AGE_TMOUT,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
LIST_INSERT_HEAD(&age_info->aged_aso,
act, next);
MLX5_AGE_SET(age_info,
@@ -946,10 +945,10 @@
for (i = 0; i < n; ++i) {
aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
MLX5_ASSERT(aso_mtr);
- verdict = __atomic_compare_exchange_n(&aso_mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
}
sq->tail += n;
@@ -1005,10 +1004,10 @@
mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,
MLX5_INDIRECT_ACTION_IDX_GET(job->action));
MLX5_ASSERT(mtr);
- verdict = __atomic_compare_exchange_n(&mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
}
@@ -1103,7 +1102,7 @@
struct mlx5_aso_sq *sq;
struct mlx5_dev_ctx_shared *sh = priv->sh;
uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
- uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+ uint8_t state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);
poll_cq_t poll_mtr_cq =
is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;
@@ -1112,7 +1111,7 @@
sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);
do {
poll_mtr_cq(priv, sq);
- if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==
ASO_METER_READY)
return 0;
/* Waiting for CQE ready. */
@@ -1411,7 +1410,7 @@
uint16_t wqe_idx;
struct mlx5_aso_ct_pool *pool;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_FREE) {
DRV_LOG(ERR, "Fail: No context to query");
@@ -1620,12 +1619,12 @@
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
else
sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
/* Waiting for CQE ready, consider should block or sleep. */
@@ -1791,7 +1790,7 @@
bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (sh->config.dv_flow_en == 2)
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
@@ -1807,7 +1806,7 @@
}
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ state = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_READY ||
state == ASO_CONNTRACK_QUERY)
return 0;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d434c67..f9c56af 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -313,7 +313,7 @@ enum mlx5_l3_tunnel_detection {
}
static inline struct mlx5_hlist *
-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,
const char *name, uint32_t size, bool direct_key,
bool lcores_share, void *ctx,
mlx5_list_create_cb cb_create,
@@ -327,7 +327,7 @@ enum mlx5_l3_tunnel_detection {
struct mlx5_hlist *expected = NULL;
char s[MLX5_NAME_SIZE];
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
if (likely(hl))
return hl;
snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
@@ -341,11 +341,11 @@ enum mlx5_l3_tunnel_detection {
"cannot allocate resource memory");
return NULL;
}
- if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_hlist_destroy(hl);
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
}
return hl;
}
@@ -6139,8 +6139,8 @@ struct mlx5_list_entry *
static struct mlx5_indexed_pool *
flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
{
- struct mlx5_indexed_pool *ipool = __atomic_load_n
- (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+ struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
+ (&sh->mdh_ipools[index], rte_memory_order_seq_cst);
if (!ipool) {
struct mlx5_indexed_pool *expected = NULL;
@@ -6165,13 +6165,13 @@ struct mlx5_list_entry *
ipool = mlx5_ipool_create(&cfg);
if (!ipool)
return NULL;
- if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
- &expected, ipool, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
+ &expected, ipool,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_ipool_destroy(ipool);
- ipool = __atomic_load_n(&sh->mdh_ipools[index],
- __ATOMIC_SEQ_CST);
+ ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
+ rte_memory_order_seq_cst);
}
}
return ipool;
@@ -6992,9 +6992,9 @@ struct mlx5_list_entry *
age_info = GET_PORT_AGE_INFO(priv);
age_param = flow_dv_counter_idx_get_age(dev, counter);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since counter may still in process.
@@ -7002,7 +7002,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -7038,8 +7038,8 @@ struct mlx5_list_entry *
* indirect action API, shared info is 1 before the reduction,
* so this condition is failed and function doesn't return here.
*/
- if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
- __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
+ rte_memory_order_relaxed) - 1)
return;
}
cnt->pool = pool;
@@ -10203,8 +10203,8 @@ struct mlx5_list_entry *
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
} else {
ret = rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -10243,8 +10243,8 @@ struct mlx5_list_entry *
geneve_opt_resource->option_class = geneve_opt_v->option_class;
geneve_opt_resource->option_type = geneve_opt_v->option_type;
geneve_opt_resource->length = geneve_opt_v->option_len;
- __atomic_store_n(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
}
exit:
rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
@@ -12192,8 +12192,8 @@ struct mlx5_list_entry *
(void *)(uintptr_t)(dev_flow->flow_idx);
age_param->timeout = age->timeout;
age_param->port_id = dev->data->port_id;
- __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);
return counter;
}
@@ -13241,9 +13241,9 @@ struct mlx5_list_entry *
uint16_t expected = AGE_CANDIDATE;
age_info = GET_PORT_AGE_INFO(priv);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since age action may still in process.
@@ -13251,7 +13251,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
LIST_REMOVE(age, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -13275,7 +13275,7 @@ struct mlx5_list_entry *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
- uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
flow_dv_aso_age_remove_from_age(dev, age);
@@ -13451,7 +13451,7 @@ struct mlx5_list_entry *
return 0; /* 0 is an error. */
}
}
- __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);
return pool->index | ((age_free->offset + 1) << 16);
}
@@ -13481,10 +13481,10 @@ struct mlx5_list_entry *
aso_age->age_params.context = context;
aso_age->age_params.timeout = timeout;
aso_age->age_params.port_id = dev->data->port_id;
- __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
- __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
static void
@@ -13666,12 +13666,12 @@ struct mlx5_list_entry *
uint32_t ret;
struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
/* Cannot release when CT is in the ASO SQ. */
if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
return -1;
- ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
if (ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -13861,7 +13861,7 @@ struct mlx5_list_entry *
pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
/* 0: inactive, 1: created, 2+: used by flows. */
- __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
if (!ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -14813,8 +14813,8 @@ struct mlx5_list_entry *
age_act = flow_aso_age_get_by_idx(dev, owner_idx);
if (flow->age == 0) {
flow->age = owner_idx;
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&age_act->refcnt, 1,
+ rte_memory_order_relaxed);
}
age_act_pos = actions_n++;
action_flags |= MLX5_FLOW_ACTION_AGE;
@@ -14851,9 +14851,9 @@ struct mlx5_list_entry *
} else {
if (flow->counter == 0) {
flow->counter = owner_idx;
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&cnt_act->shared_info.refcnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
}
/* Save information first, will apply later. */
action_flags |= MLX5_FLOW_ACTION_COUNT;
@@ -15185,8 +15185,8 @@ struct mlx5_list_entry *
flow->indirect_type =
MLX5_INDIRECT_ACTION_TYPE_CT;
flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
}
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
@@ -15855,7 +15855,7 @@ struct mlx5_list_entry *
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
- __atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
}
void
@@ -16038,8 +16038,8 @@ struct mlx5_list_entry *
sh->geneve_tlv_option_resource;
rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
if (geneve_opt_resource) {
- if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED) - 1)) {
+ if (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
claim_zero(mlx5_devx_cmd_destroy
(geneve_opt_resource->obj));
mlx5_free(sh->geneve_tlv_option_resource);
@@ -16448,7 +16448,7 @@ struct mlx5_list_entry *
/* Update queue with indirect table queue memoyr. */
origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
- __atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
@@ -16494,9 +16494,9 @@ struct mlx5_list_entry *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid shared action");
- if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
- 0, 0, __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,
+ 0, rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -16632,10 +16632,10 @@ struct rte_flow_action_handle *
return __flow_dv_action_rss_release(dev, idx, error);
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
- if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
- &no_flow_refcnt, 1, false,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
+ &no_flow_refcnt, 1,
+ rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -17595,13 +17595,13 @@ struct rte_flow_action_handle *
case MLX5_INDIRECT_ACTION_TYPE_AGE:
age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
resp = data;
- resp->aged = __atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) == AGE_TMOUT ?
+ resp->aged = rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) == AGE_TMOUT ?
1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_dv_query_count(dev, idx, data, error);
@@ -17678,12 +17678,12 @@ struct rte_flow_action_handle *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
}
- resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+ resp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==
AGE_TMOUT ? 1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 4ae03a2..8a02247 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -86,7 +86,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map |= 1u << idx;
}
}
@@ -107,7 +107,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map &= ~(1u << idx);
rte_spinlock_unlock(&priv->flex_item_sl);
}
@@ -379,7 +379,7 @@
return ret;
}
if (acquire)
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return ret;
}
@@ -414,7 +414,7 @@
rte_errno = -EINVAL;
return -EINVAL;
}
- __atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
return 0;
}
@@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *
}
flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
/* Mark initialized flex item valid. */
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return (struct rte_flow_item_flex_handle *)flex;
error:
@@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
"invalid flex item handle value");
}
- if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
+ rte_memory_order_acquire, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&priv->flex_item_sl);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9ebbe66..8891f3c 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -715,7 +715,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
}
if (acts->mark)
- if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed) - 1))
flow_hw_rxq_flag_set(dev, false);
if (acts->jump) {
@@ -2298,7 +2299,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
goto err;
acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
- __atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
@@ -4537,8 +4539,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
uint8_t i;
for (i = 0; i < nb_action_templates; i++) {
- uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED);
+ uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (refcnt <= 1) {
rte_flow_error_set(error, EINVAL,
@@ -4576,8 +4578,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
while (i--) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_sub_fetch(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
return rte_errno;
}
@@ -4748,8 +4750,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
- ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED) + 1;
+ ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (ret <= 1) {
rte_errno = EINVAL;
goto it_error;
@@ -4800,14 +4802,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
for (i = 0; i < nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_fetch_sub(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
i = nb_item_templates;
it_error:
while (i--)
- __atomic_fetch_sub(&item_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
error:
err = rte_errno;
if (tbl) {
@@ -5039,12 +5041,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
LIST_REMOVE(table, next);
for (i = 0; i < table->nb_item_templates; i++)
- __atomic_fetch_sub(&table->its[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
+ 1, rte_memory_order_relaxed);
for (i = 0; i < table->nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
- __atomic_fetch_sub(&table->ats[i].action_template->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
+ 1, rte_memory_order_relaxed);
}
flow_hw_destroy_table_multi_pattern_ctx(table);
if (table->matcher_info[0].matcher)
@@ -7287,7 +7289,7 @@ enum mlx5_hw_indirect_list_relative_position {
if (!at->tmpl)
goto error;
at->action_flags = action_flags;
- __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
return at;
error:
@@ -7323,7 +7325,7 @@ enum mlx5_hw_indirect_list_relative_position {
uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Action template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -7897,7 +7899,7 @@ enum mlx5_hw_indirect_list_relative_position {
break;
}
}
- __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
rte_errno = pattern_template_validate(dev, &it, 1);
if (rte_errno)
goto error;
@@ -7933,7 +7935,7 @@ enum mlx5_hw_indirect_list_relative_position {
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Item template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -10513,7 +10515,8 @@ struct mlx5_list_entry *
}
dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
priv->shared_host = host_dev;
- __atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
}
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
@@ -10698,7 +10701,8 @@ struct mlx5_list_entry *
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
if (priv->hw_q) {
@@ -10814,7 +10818,8 @@ struct mlx5_list_entry *
priv->hw_q = NULL;
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
mlx5_free(priv->hw_attr);
@@ -10872,8 +10877,8 @@ struct mlx5_list_entry *
NULL,
"Invalid CT destruction index");
}
- __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
+ rte_memory_order_relaxed);
mlx5_ipool_free(pool->cts, idx);
return 0;
}
@@ -11572,7 +11577,7 @@ struct mlx5_hw_q_job *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_REPORTED:
case HWS_AGE_AGED_OUT_NOT_REPORTED:
resp->aged = 1;
@@ -11592,8 +11597,8 @@ struct mlx5_hw_q_job *
}
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (¶m->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (¶m->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index ca361f7..da3289b 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -2055,9 +2055,9 @@ struct mlx5_flow_meter_policy *
NULL, "Meter profile id not valid.");
/* Meter policy must exist. */
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
if (!priv->sh->config.dv_esw_en)
domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -2137,7 +2137,7 @@ struct mlx5_flow_meter_policy *
fm->is_enable = params->meter_enable;
fm->shared = !!shared;
fm->color_aware = !!params->use_prev_mtr_color;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
fm->def_policy = 1;
fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
@@ -2166,7 +2166,7 @@ struct mlx5_flow_meter_policy *
}
fm->active_state = params->meter_enable;
if (mtr_policy)
- __atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
error:
mlx5_flow_destroy_mtr_tbls(dev, fm);
@@ -2271,8 +2271,8 @@ struct mlx5_flow_meter_policy *
NULL, "Failed to create devx meter.");
}
fm->active_state = params->meter_enable;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
- __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
}
#endif
@@ -2295,7 +2295,7 @@ struct mlx5_flow_meter_policy *
if (fmp == NULL)
return -1;
/* Update dependencies. */
- __atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);
fm->profile = NULL;
/* Remove from list. */
if (!priv->sh->meter_aso_en) {
@@ -2313,15 +2313,15 @@ struct mlx5_flow_meter_policy *
}
mlx5_flow_destroy_mtr_tbls(dev, fm);
if (fm->def_policy)
- __atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
+ 1, rte_memory_order_relaxed);
if (priv->sh->meter_aso_en) {
if (!fm->def_policy) {
mtr_policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
if (mtr_policy)
- __atomic_fetch_sub(&mtr_policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
fm->policy_id = 0;
}
fm->def_policy = 0;
@@ -2424,13 +2424,13 @@ struct mlx5_flow_meter_policy *
RTE_MTR_ERROR_TYPE_UNSPECIFIED,
NULL, "Meter object is being used.");
/* Destroy the meter profile. */
- __atomic_fetch_sub(&fm->profile->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
+ 1, rte_memory_order_relaxed);
/* Destroy the meter policy. */
policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
- __atomic_fetch_sub(&policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c
index 14a2a8b..6ad0e8a 100644
--- a/drivers/net/mlx5/mlx5_flow_quota.c
+++ b/drivers/net/mlx5/mlx5_flow_quota.c
@@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
struct mlx5_quota *quota_obj =
sq->elts[(sq->tail + i) & mask].quota_obj;
- __atomic_compare_exchange_n("a_obj->state, &state,
- MLX5_QUOTA_STATE_READY, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit("a_obj->state, &state,
+ MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
}
}
@@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
rte_spinlock_lock(&sq->sqsl);
mlx5_quota_cmd_completion_handle(sq);
rte_spinlock_unlock(&sq->sqsl);
- if (__atomic_load_n("a_obj->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit("a_obj->state, rte_memory_order_relaxed) ==
MLX5_QUOTA_STATE_READY)
return 0;
} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
{
uint8_t state = MLX5_QUOTA_STATE_READY;
- bool verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ bool verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict)
return rte_flow_error_set(error, EBUSY,
@@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
async_job ? async_job : &sync_job, push, NULL);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
async_job ? async_job : &sync_job, push,
(void *)(uintptr_t)update->conf);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -593,9 +593,9 @@ struct rte_flow_action_handle *
NULL, "quota: failed to allocate quota object");
return NULL;
}
- verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: new quota object has invalid state");
@@ -616,8 +616,8 @@ struct rte_flow_action_handle *
(void *)(uintptr_t)conf);
if (ret) {
mlx5_ipool_free(qctx->quota_ipool, id);
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
+ rte_memory_order_relaxed);
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: WR failure");
return 0;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index c31f2f3..1b625e0 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -149,7 +149,7 @@
}
if (param->timeout == 0)
continue;
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_NOT_REPORTED:
case HWS_AGE_AGED_OUT_REPORTED:
/* Already aged-out, no action is needed. */
@@ -171,8 +171,8 @@
hits = rte_be_to_cpu_64(stats[i].hits);
if (param->nb_cnts == 1) {
if (hits != param->accumulator_last_hits) {
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
param->accumulator_last_hits = hits;
continue;
}
@@ -184,8 +184,8 @@
param->accumulator_cnt = 0;
if (param->accumulator_last_hits !=
param->accumulator_hits) {
- __atomic_store_n(¶m->sec_since_last_hit,
- 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit,
+ 0, rte_memory_order_relaxed);
param->accumulator_last_hits =
param->accumulator_hits;
param->accumulator_hits = 0;
@@ -193,9 +193,9 @@
}
param->accumulator_hits = 0;
}
- if (__atomic_fetch_add(¶m->sec_since_last_hit, time_delta,
- __ATOMIC_RELAXED) + time_delta <=
- __atomic_load_n(¶m->timeout, __ATOMIC_RELAXED))
+ if (rte_atomic_fetch_add_explicit(¶m->sec_since_last_hit, time_delta,
+ rte_memory_order_relaxed) + time_delta <=
+ rte_atomic_load_explicit(¶m->timeout, rte_memory_order_relaxed))
continue;
/* Prepare the relevant ring for this AGE parameter */
if (priv->hws_strict_queue)
@@ -203,10 +203,10 @@
else
r = age_info->hw_age.aged_list;
/* Changing the state atomically and insert it into the ring. */
- if (__atomic_compare_exchange_n(¶m->state, &expected1,
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected1,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
sizeof(uint32_t),
1, NULL);
@@ -221,11 +221,10 @@
*/
expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
if (ret == 0 &&
- !__atomic_compare_exchange_n(¶m->state,
+ !rte_atomic_compare_exchange_strong_explicit(¶m->state,
&expected2, expected1,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected2 == HWS_AGE_FREE)
mlx5_hws_age_param_free(priv,
param->own_cnt_index,
@@ -235,10 +234,10 @@
if (!priv->hws_strict_queue)
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
} else {
- __atomic_compare_exchange_n(¶m->state, &expected2,
+ rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected2,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
}
}
/* The event is irrelevant in strict queue mode. */
@@ -796,8 +795,8 @@ struct mlx5_hws_cnt_pool *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
- switch (__atomic_exchange_n(¶m->state, HWS_AGE_FREE,
- __ATOMIC_RELAXED)) {
+ switch (rte_atomic_exchange_explicit(¶m->state, HWS_AGE_FREE,
+ rte_memory_order_relaxed)) {
case HWS_AGE_CANDIDATE:
case HWS_AGE_AGED_OUT_REPORTED:
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
@@ -862,8 +861,8 @@ struct mlx5_hws_cnt_pool *
"cannot allocate AGE parameter");
return 0;
}
- MLX5_ASSERT(__atomic_load_n(¶m->state,
- __ATOMIC_RELAXED) == HWS_AGE_FREE);
+ MLX5_ASSERT(rte_atomic_load_explicit(¶m->state,
+ rte_memory_order_relaxed) == HWS_AGE_FREE);
if (shared) {
param->nb_cnts = 0;
param->accumulator_hits = 0;
@@ -914,9 +913,9 @@ struct mlx5_hws_cnt_pool *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
if (update_ade->timeout_valid) {
- uint32_t old_timeout = __atomic_exchange_n(¶m->timeout,
+ uint32_t old_timeout = rte_atomic_exchange_explicit(¶m->timeout,
update_ade->timeout,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (old_timeout == 0)
sec_since_last_hit_reset = true;
@@ -935,8 +934,8 @@ struct mlx5_hws_cnt_pool *
state_update = true;
}
if (sec_since_last_hit_reset)
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
if (state_update) {
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
@@ -945,13 +944,13 @@ struct mlx5_hws_cnt_pool *
* - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
* - AGED_OUT_REPORTED -> CANDIDATE
*/
- if (!__atomic_compare_exchange_n(¶m->state, &expected,
+ if (!rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
HWS_AGE_CANDIDATE_INSIDE_RING,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected == HWS_AGE_AGED_OUT_REPORTED)
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
return 0;
}
@@ -976,9 +975,9 @@ struct mlx5_hws_cnt_pool *
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
MLX5_ASSERT(param != NULL);
- if (__atomic_compare_exchange_n(¶m->state, &expected,
- HWS_AGE_AGED_OUT_REPORTED, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
+ HWS_AGE_AGED_OUT_REPORTED,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
return param->context;
switch (expected) {
case HWS_AGE_FREE:
@@ -990,8 +989,8 @@ struct mlx5_hws_cnt_pool *
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
break;
case HWS_AGE_CANDIDATE_INSIDE_RING:
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
break;
case HWS_AGE_CANDIDATE:
/*
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index 1cb0564..db4e99e 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -101,7 +101,7 @@ struct __rte_cache_aligned mlx5_hws_cnt_pool {
LIST_ENTRY(mlx5_hws_cnt_pool) next;
alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_pool_cfg cfg;
alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_dcs_mng dcs_mng;
- alignas(RTE_CACHE_LINE_SIZE) uint32_t query_gen;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) query_gen;
struct mlx5_hws_cnt *pool;
struct mlx5_hws_cnt_raw_data_mng *raw_mng;
struct rte_ring *reuse_list;
@@ -134,10 +134,10 @@ enum {
/* HWS counter age parameter. */
struct __rte_cache_aligned mlx5_hws_age_param {
- uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/* Time in seconds since last hit (atomically accessed). */
- uint16_t state; /* AGE state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
uint64_t accumulator_last_hits;
/* Last total value of hits for comparing. */
uint64_t accumulator_hits;
@@ -426,7 +426,7 @@ struct __rte_cache_aligned mlx5_hws_age_param {
iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
hpool->pool[iidx].in_used = false;
hpool->pool[iidx].query_gen_when_free =
- __atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
qcache = hpool->cache->qcache[*queue];
if (unlikely(qcache == NULL)) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index fb4d8e6..d008e4d 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {
/* RX queue private data. */
struct mlx5_rxq_priv {
uint16_t idx; /* Queue index. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -188,7 +188,7 @@ struct mlx5_rxq_priv {
/* External RX queue descriptor. */
struct mlx5_external_rxq {
uint32_t hw_id; /* Queue index in the Hardware. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
};
/* mlx5_rxq.c */
@@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
void *addr;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {
MLX5_ASSERT(rep != NULL);
/* Replace MPRQ buf. */
(*rxq->mprq_bufs)[rq_idx] = rep;
@@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
void *buf_addr;
/* Increment the refcnt of the whole chunk. */
- __atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
- MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
- __ATOMIC_RELAXED) <= strd_n + 1);
+ rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
+ MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
+ rte_memory_order_relaxed) <= strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
/*
* MLX5 device doesn't use iova but it is necessary in a
@@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
return false;
rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
- return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+ return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);
}
#define LWM_COOKIE_RXQID_OFFSET 0
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dd51687..f67aaa6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -416,7 +416,7 @@
rte_errno = EINVAL;
return -rte_errno;
}
- return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1319,7 +1319,7 @@
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
for (j = 0; j != strd_n; ++j) {
shinfo = &buf->shinfos[j];
shinfo->free_cb = mlx5_mprq_buf_free_cb;
@@ -2037,7 +2037,7 @@ struct mlx5_rxq_priv *
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
if (rxq != NULL)
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2059,7 +2059,7 @@ struct mlx5_rxq_priv *
if (rxq == NULL)
return 0;
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2138,7 +2138,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2158,7 +2158,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2447,8 +2447,8 @@ struct mlx5_ind_table_obj *
(memcmp(ind_tbl->queues, queues,
ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
== 0)) {
- __atomic_fetch_add(&ind_tbl->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
+ rte_memory_order_relaxed);
break;
}
}
@@ -2479,7 +2479,7 @@ struct mlx5_ind_table_obj *
unsigned int ret;
rte_rwlock_write_lock(&priv->ind_tbls_lock);
- ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
@@ -2561,7 +2561,7 @@ struct mlx5_ind_table_obj *
}
return ret;
}
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
return 0;
}
@@ -2626,7 +2626,7 @@ struct mlx5_ind_table_obj *
{
uint32_t refcnt;
- refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+ refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
if (refcnt <= 1)
return 0;
/*
@@ -3258,8 +3258,8 @@ struct mlx5_hrxq *
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
if (ext_rxq == NULL)
return -rte_errno;
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
if (ext_rxq->hw_id != hw_idx) {
DRV_LOG(ERR, "Port %u external RxQ index %u "
"is already mapped to HW index (requesting is "
@@ -3296,8 +3296,8 @@ struct mlx5_hrxq *
rte_errno = EINVAL;
return -rte_errno;
}
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
port_id, dpdk_idx);
rte_errno = EINVAL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f8d6728..c241a1d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1441,7 +1441,7 @@
rte_delay_us_sleep(1000 * priv->rxqs_n);
DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
if (priv->sh->config.dv_flow_en == 2) {
- if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))
flow_hw_rxq_flag_set(dev, false);
} else {
mlx5_flow_stop_default(dev);
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 107d7ab..0d77ff8 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -179,7 +179,7 @@ struct __rte_cache_aligned mlx5_txq_data {
__extension__
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
bool is_hairpin; /* Whether TxQ type is Hairpin. */
unsigned int max_inline_data; /* Max inline data. */
@@ -339,8 +339,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
* the service thread, data should be re-read.
*/
rte_compiler_barrier();
- ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
- ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
+ ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
+ ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
break;
@@ -350,8 +350,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts -= ts;
if (unlikely(mts >= UINT64_MAX / 2)) {
/* We have negative integer, mts is in the past. */
- __atomic_fetch_add(&sh->txpp.err_ts_past,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
+ 1, rte_memory_order_relaxed);
return -1;
}
tick = sh->txpp.tick;
@@ -360,8 +360,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts = (mts + tick - 1) / tick;
if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
/* We have mts is too distant future. */
- __atomic_fetch_add(&sh->txpp.err_ts_future,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
+ 1, rte_memory_order_relaxed);
return -1;
}
mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
@@ -1743,8 +1743,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
/* Convert the timestamp into completion to wait. */
ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
if (txq->ts_last && ts < txq->ts_last)
- __atomic_fetch_add(&txq->sh->txpp.err_ts_order,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
+ 1, rte_memory_order_relaxed);
txq->ts_last = ts;
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
sh = txq->sh;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 5a5df2d..4e26fa2 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -538,12 +538,12 @@
uint64_t *ps;
rte_compiler_barrier();
- tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
- op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
+ tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);
+ op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);
rte_compiler_barrier();
- if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
+ if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))
continue;
- if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
+ if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))
continue;
ps = (uint64_t *)ts;
ps[0] = tm;
@@ -561,8 +561,8 @@
ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
rte_compiler_barrier();
- __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);
rte_wmb();
}
@@ -590,8 +590,8 @@
*/
DRV_LOG(DEBUG,
"Clock Queue error sync lost (%X).", opcode);
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
return;
@@ -633,10 +633,10 @@
if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
return;
MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
- sh->txpp.ts.ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
- sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+ sh->txpp.ts.ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+ sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
sh->txpp.ts_p = 0;
if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -677,8 +677,8 @@
/* Check whether we have missed interrupts. */
if (cq_ci - wq->cq_ci != 1) {
DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
- __atomic_fetch_add(&sh->txpp.err_miss_int,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
+ 1, rte_memory_order_relaxed);
/* Check sync lost on wqe index. */
if (cq_ci - wq->cq_ci >=
(((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -693,8 +693,8 @@
/* Fire new requests to Rearm Queue. */
if (error) {
DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_rearm_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
}
@@ -987,8 +987,8 @@
mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
if (to.cts.op_own >> 4) {
DRV_LOG(DEBUG, "Clock Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
return -EIO;
}
@@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);
return 0;
}
@@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
do {
uint64_t ts, ci;
- ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
- ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
+ ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);
+ ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ts,
- __ATOMIC_RELAXED) != ts)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
+ rte_memory_order_relaxed) != ts)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
- __ATOMIC_RELAXED) != ci)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
+ rte_memory_order_relaxed) != ci)
continue;
tsa->ts = ts;
tsa->ci_ts = ci;
@@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
for (i = 0; i < n_txpp; ++i)
stats[n_used + i].id = n_used + i;
stats[n_used + 0].value =
- __atomic_load_n(&sh->txpp.err_miss_int,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_miss_int,
+ rte_memory_order_relaxed);
stats[n_used + 1].value =
- __atomic_load_n(&sh->txpp.err_rearm_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
+ rte_memory_order_relaxed);
stats[n_used + 2].value =
- __atomic_load_n(&sh->txpp.err_clock_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
+ rte_memory_order_relaxed);
stats[n_used + 3].value =
- __atomic_load_n(&sh->txpp.err_ts_past,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_past,
+ rte_memory_order_relaxed);
stats[n_used + 4].value =
- __atomic_load_n(&sh->txpp.err_ts_future,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_future,
+ rte_memory_order_relaxed);
stats[n_used + 5].value =
- __atomic_load_n(&sh->txpp.err_ts_order,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_order,
+ rte_memory_order_relaxed);
stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
stats[n_used + 8].value = sh->txpp.sync_lost;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 14f55e8..da4236f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *
rte_errno = ENOMEM;
goto error;
}
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
tmpl->is_hairpin = false;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->is_hairpin = true;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
- __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
}
return ctrl;
}
@@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *
if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
return 0;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
+ if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
return 1;
if (txq_ctrl->obj) {
priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *
txq_free_elts(txq_ctrl);
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
- if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
if (!txq_ctrl->is_hairpin)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq_ctrl, next);
@@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *
if (!(*priv->txqs)[idx])
return -1;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
}
/**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index e28db2e..fc03cc0 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -203,7 +203,7 @@ struct mlx5_indexed_pool *
struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
lc = pool->cache[cidx]->lc;
- gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
+ gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
if (gc && lc != gc) {
mlx5_ipool_lock(pool);
if (lc && !(--lc->ref_cnt))
@@ -266,8 +266,8 @@ struct mlx5_indexed_pool *
pool->cache[cidx]->len = fetch_size - 1;
return pool->cache[cidx]->idx[pool->cache[cidx]->len];
}
- trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
- __ATOMIC_ACQUIRE) : 0;
+ trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
+ rte_memory_order_acquire) : 0;
trunk_n = lc ? lc->n_trunk : 0;
cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
/* Check if index reach maximum. */
@@ -332,11 +332,11 @@ struct mlx5_indexed_pool *
lc = p;
lc->ref_cnt = 1;
pool->cache[cidx]->lc = lc;
- __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
}
/* Add trunk to trunks array. */
lc->trunks[trunk_idx] = trunk;
- __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
/* Enqueue half of the index to global. */
ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
fetch_size = trunk->free >> 1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index b51d977..d86a809 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {
struct mlx5_indexed_cache {
struct mlx5_indexed_trunk **trunks;
- volatile uint32_t n_trunk_valid; /* Trunks allocated. */
+ volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
uint32_t n_trunk; /* Trunk pointer array size. */
uint32_t ref_cnt;
uint32_t len;
@@ -266,7 +266,7 @@ struct mlx5_indexed_pool {
uint32_t free_list; /* Index to first free trunk. */
};
struct {
- struct mlx5_indexed_cache *gc;
+ RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
/* Global cache. */
struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
/* Local cache. */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 02/45] net/ixgbe: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 01/45] net/mlx5: use rte " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 03/45] net/iavf: " Tyler Retzlaff
` (43 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 14 ++++++++------
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 ++--
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c61c52b..e63ae1a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1130,7 +1130,7 @@ struct rte_ixgbe_xstats_name_off {
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbe_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -1638,7 +1638,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbevf_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
@@ -4203,7 +4203,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
/* NOTE: review for potential ordering optimization */
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -4240,7 +4240,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -4336,7 +4336,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
ixgbe_dev_wait_setup_link_complete(dev, 0);
/* NOTE: review for potential ordering optimization */
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the IXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -4348,7 +4349,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR,
"Create link thread failed!");
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 22fc3be..8ad841e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -511,7 +511,7 @@ struct ixgbe_adapter {
*/
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
- bool link_thread_running;
+ RTE_ATOMIC(bool) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 3d39eaa..0d42fd8 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1831,7 +1831,7 @@ const alignas(RTE_CACHE_LINE_SIZE) uint32_t
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
@@ -2114,7 +2114,7 @@ const alignas(RTE_CACHE_LINE_SIZE) uint32_t
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 03/45] net/iavf: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 02/45] net/ixgbe: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 04/45] net/ice: " Tyler Retzlaff
` (42 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/iavf/iavf.h | 16 ++++++++--------
drivers/net/iavf/iavf_rxtx.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +++++++-------
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 7ab41c9..ad526c6 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -238,8 +238,8 @@ struct iavf_info {
struct virtchnl_vlan_caps vlan_v2_caps;
uint64_t supported_rxdid;
uint8_t *proto_xtr; /* proto xtr type for all queues */
- volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
- uint32_t pend_cmd_count;
+ volatile RTE_ATOMIC(enum virtchnl_ops) pend_cmd; /* pending command not finished */
+ RTE_ATOMIC(uint32_t) pend_cmd_count;
int cmd_retval; /* return value of the cmd response from PF */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
@@ -456,13 +456,13 @@ struct iavf_cmd_info {
_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 1, rte_memory_order_relaxed);
return !ret;
}
@@ -472,13 +472,13 @@ struct iavf_cmd_info {
_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 2, rte_memory_order_relaxed);
return !ret;
}
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 59a0b9e..ecc3143 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2025,7 +2025,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
@@ -2152,7 +2152,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
index 83825aa..20b656e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_neon.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
@@ -273,7 +273,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1111d30..6d5969f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -41,7 +41,7 @@ struct iavf_event_element {
};
struct iavf_event_handler {
- uint32_t ndev;
+ RTE_ATOMIC(uint32_t) ndev;
rte_thread_t tid;
int fd[2];
pthread_mutex_t lock;
@@ -129,7 +129,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_add(&handler->ndev, 1, __ATOMIC_RELAXED) + 1 != 1)
+ if (rte_atomic_fetch_add_explicit(&handler->ndev, 1, rte_memory_order_relaxed) + 1 != 1)
return 0;
#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
@@ -137,7 +137,7 @@ struct iavf_event_handler {
int err = pipe(handler->fd);
#endif
if (err != 0) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -146,7 +146,7 @@ struct iavf_event_handler {
if (rte_thread_create_internal_control(&handler->tid, "iavf-event",
iavf_dev_event_handle, NULL)) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -158,7 +158,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed) - 1 != 0)
return;
int unused = pthread_cancel((pthread_t)handler->tid.opaque_id);
@@ -574,8 +574,8 @@ struct iavf_event_handler {
/* read message and it's expected one */
if (msg_opc == vf->pend_cmd) {
uint32_t cmd_count =
- __atomic_fetch_sub(&vf->pend_cmd_count,
- 1, __ATOMIC_RELAXED) - 1;
+ rte_atomic_fetch_sub_explicit(&vf->pend_cmd_count,
+ 1, rte_memory_order_relaxed) - 1;
if (cmd_count == 0)
_notify_cmd(vf, msg_ret);
} else {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 04/45] net/ice: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (2 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 03/45] net/iavf: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 05/45] net/i40e: " Tyler Retzlaff
` (41 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ice/base/ice_osdep.h | 4 ++--
drivers/net/ice/ice_dcf.c | 6 +++---
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 ++++----
drivers/net/ice/ice_dcf_parent.c | 16 ++++++++--------
drivers/net/ice/ice_ethdev.c | 12 ++++++------
drivers/net/ice/ice_ethdev.h | 2 +-
7 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index 0e14b93..c17f1bf 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -235,7 +235,7 @@ struct ice_lock {
ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
struct ice_dma_mem *mem, u64 size)
{
- static uint64_t ice_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) ice_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -243,7 +243,7 @@ struct ice_lock {
return NULL;
snprintf(z_name, sizeof(z_name), "ice_dma_%" PRIu64,
- __atomic_fetch_add(&ice_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&ice_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
0, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7f8f516..204d4ea 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -764,7 +764,7 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
rte_spinlock_init(&hw->vc_cmd_queue_lock);
TAILQ_INIT(&hw->vc_cmd_queue);
- __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->vsi_update_thread_num, 0, rte_memory_order_relaxed);
hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
if (hw->arq_buf == NULL) {
@@ -888,8 +888,8 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
ice_dcf_dev_interrupt_handler, hw);
/* Wait for all `ice-thread` threads to exit. */
- while (__atomic_load_n(&hw->vsi_update_thread_num,
- __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(&hw->vsi_update_thread_num,
+ rte_memory_order_acquire) != 0)
rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
ice_dcf_mode_disable(hw);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index aa2a723..7726681 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -105,7 +105,7 @@ struct ice_dcf_hw {
void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen);
- int vsi_update_thread_num;
+ RTE_ATOMIC(int) vsi_update_thread_num;
uint8_t *arq_buf;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index d58ec9d..8f3a385 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -1743,7 +1743,7 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
ice_dcf_adminq_need_retry(struct ice_adapter *ad)
{
return ad->hw.dcf_enabled &&
- !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+ !rte_atomic_load_explicit(&ad->dcf_state_on, rte_memory_order_relaxed);
}
/* Add UDP tunneling port */
@@ -1944,12 +1944,12 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
return -1;
}
- __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true, rte_memory_order_relaxed);
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 6e845f4..a478b69 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -123,8 +123,8 @@ struct ice_dcf_reset_event_param {
container_of(hw, struct ice_dcf_adapter, real_hw);
struct ice_adapter *parent_adapter = &adapter->parent;
- __atomic_fetch_add(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_relaxed);
rte_thread_detach(rte_thread_self());
@@ -133,8 +133,8 @@ struct ice_dcf_reset_event_param {
rte_spinlock_lock(&vsi_update_lock);
if (!ice_dcf_handle_vsi_update_event(hw)) {
- __atomic_store_n(&parent_adapter->dcf_state_on, true,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true,
+ rte_memory_order_relaxed);
ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
}
@@ -156,8 +156,8 @@ struct ice_dcf_reset_event_param {
free(param);
- __atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_release);
return 0;
}
@@ -269,8 +269,8 @@ struct ice_dcf_reset_event_param {
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
start_vsi_reset_thread(dcf_hw, true,
pf_msg->event_data.vf_vsi_map.vf_id);
break;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 87385d2..0f35c6a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4062,9 +4062,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = &dev->data->dev_link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
@@ -4078,9 +4078,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 984479a..d73faae 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -621,7 +621,7 @@ struct ice_adapter {
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
/* True if DCF state of the associated PF is on */
- bool dcf_state_on;
+ RTE_ATOMIC(bool) dcf_state_on;
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 05/45] net/i40e: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (3 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 04/45] net/ice: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 06/45] net/hns3: " Tyler Retzlaff
` (40 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/i40e/i40e_ethdev.c | 4 ++--
drivers/net/i40e/i40e_rxtx.c | 6 +++---
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 380ce1a..801cc95 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -4687,7 +4687,7 @@ enum i40e_status_code
u64 size,
u32 alignment)
{
- static uint64_t i40e_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) i40e_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -4695,7 +4695,7 @@ enum i40e_status_code
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
- __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&i40e_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 5d25ab4..155f243 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -486,7 +486,7 @@
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
@@ -745,7 +745,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
@@ -867,7 +867,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index d873e30..3a99137 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -425,7 +425,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 06/45] net/hns3: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (4 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 05/45] net/i40e: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 07/45] net/bnxt: " Tyler Retzlaff
` (39 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/hns3/hns3_cmd.c | 18 ++++++------
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 +++++++++++------------
drivers/net/hns3/hns3_ethdev.h | 32 ++++++++++-----------
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++++++++++++++++++--------------------
drivers/net/hns3/hns3_intr.c | 36 +++++++++++------------
drivers/net/hns3/hns3_intr.h | 4 +--
drivers/net/hns3/hns3_mbx.c | 6 ++--
drivers/net/hns3/hns3_mp.c | 6 ++--
drivers/net/hns3/hns3_rxtx.c | 10 +++----
drivers/net/hns3/hns3_tm.c | 4 +--
11 files changed, 107 insertions(+), 107 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 001ff49..3c5fdbe 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,12 @@
hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
uint64_t size, uint32_t alignment)
{
- static uint64_t hns3_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
- __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
@@ -198,8 +198,8 @@
hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
csq->next_to_use, csq->next_to_clean);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- __atomic_store_n(&hw->reset.disable_cmd, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+ rte_memory_order_relaxed);
hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
}
@@ -313,7 +313,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (hns3_cmd_csq_done(hw))
return 0;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw,
"Don't wait for reply because of disable_cmd");
return -EBUSY;
@@ -360,7 +360,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
int retval;
uint32_t ntc;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -747,7 +747,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
ret = -EBUSY;
goto err_cmd_init;
}
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
ret = hns3_cmd_query_firmware_version_and_capability(hw);
if (ret) {
@@ -790,7 +790,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
return 0;
err_cmd_init:
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return ret;
}
@@ -819,7 +819,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (!hns->is_vf)
(void)hns3_firmware_compat_config(hw, false);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
/*
* A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 915e4eb..2f917fe 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,7 @@
* and configured directly to the hardware in the RESET_STAGE_RESTORE
* stage of the reset process.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a..327f6fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ struct hns3_intr_state {
};
static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
@@ -134,7 +134,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
hw->reset.stats.imp_cnt++;
@@ -148,7 +148,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
hw->reset.stats.global_cnt++;
@@ -1151,7 +1151,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* ensure that the hardware configuration remains unchanged before and
* after reset.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
}
@@ -1175,7 +1175,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* we will restore configurations to hardware in hns3_restore_vlan_table
* and hns3_restore_vlan_conf later.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
if (ret) {
hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5059,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5150,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5158,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
return ret;
hw->mac.link_status = RTE_ETH_LINK_DOWN;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -5184,7 +5184,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_tm_dev_stop_proc(hw);
hns3_config_mac_tnl_int(hw, false);
hns3_stop_tqps(hw);
@@ -5577,7 +5577,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
last_req = hns3_get_reset_level(hns, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -5677,7 +5677,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
}
static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
struct hns3_hw *hw = &hns->hw;
enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5737,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* any mailbox handling or command to firmware is only valid
* after hns3_cmd_init is called.
*/
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hw->reset.stats.request_cnt++;
break;
case HNS3_IMP_RESET:
@@ -5792,7 +5792,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -5913,10 +5913,10 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5925,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Check if there is any ongoing reset in the hardware. This status can
@@ -6576,7 +6576,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index a4bc62a..a6b6524 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -401,17 +401,17 @@ enum hns3_schedule {
struct hns3_reset_data {
enum hns3_reset_stage stage;
- uint16_t schedule;
+ RTE_ATOMIC(uint16_t) schedule;
/* Reset flag, covering the entire reset process */
- uint16_t resetting;
+ RTE_ATOMIC(uint16_t) resetting;
/* Used to disable sending cmds during reset */
- uint16_t disable_cmd;
+ RTE_ATOMIC(uint16_t) disable_cmd;
/* The reset level being processed */
enum hns3_reset_level level;
/* Reset level set, each bit represents a reset level */
- uint64_t pending;
+ RTE_ATOMIC(uint64_t) pending;
/* Request reset level set, from interrupt or mailbox */
- uint64_t request;
+ RTE_ATOMIC(uint64_t) request;
int attempts; /* Reset failure retry */
int retries; /* Timeout failure retry in reset_post */
/*
@@ -499,7 +499,7 @@ struct hns3_hw {
* by dev_set_link_up() or dev_start().
*/
bool set_link_down;
- unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+ RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */
struct hns3_tqp_stats tqp_stats;
/* Include Mac stats | Rx stats | Tx stats */
struct hns3_mac_stats mac_stats;
@@ -844,7 +844,7 @@ struct hns3_vf {
struct hns3_adapter *adapter;
/* Whether PF support push link status change to VF */
- uint16_t pf_push_lsc_cap;
+ RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
/*
* If PF support push link status change, VF still need send request to
@@ -853,7 +853,7 @@ struct hns3_vf {
*/
uint16_t req_link_info_cnt;
- uint16_t poll_job_started; /* whether poll job is started */
+ RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
};
struct hns3_adapter {
@@ -997,32 +997,32 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
hns3_read_reg((a)->io_base, (reg))
static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t res;
- res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+ res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & (1UL << nr)) != 0;
return res;
}
static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_or_explicit(addr, (1UL << nr), rte_memory_order_relaxed);
}
static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), rte_memory_order_relaxed);
}
static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t mask = (1UL << nr);
- return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+ return rte_atomic_fetch_and_explicit(addr, ~mask, rte_memory_order_relaxed) & mask;
}
int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a..b83d5b9 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
};
static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
@@ -484,7 +484,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* MTU value issued by hns3 VF PMD must be less than or equal to
* PF's MTU.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "Failed to set mtu during resetting");
return -EIO;
}
@@ -565,7 +565,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
val = hns3_read_dev(hw, HNS3_VF_RST_ING);
hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
}
static void
@@ -650,8 +650,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
struct hns3_vf_to_pf_msg req;
- __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+ rte_memory_order_release);
hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* mailbox from PF driver to get this capability.
*/
hns3vf_handle_mbx_msg(hw);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
break;
remain_ms--;
@@ -677,10 +677,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* state: unknown (means pf not ack), not_supported, supported.
* Here config it as 'not_supported' when it's 'unknown' state.
*/
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
hns3_info(hw, "detect PF support push link status change!");
} else {
@@ -920,7 +920,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
bool send_req;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return;
send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +956,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* sending request to PF kernel driver, then could update link status by
* process PF kernel driver's link status mailbox message.
*/
- if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
return;
if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +994,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_hw *hw = &hns->hw;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw,
"vf set vlan id failed during resetting, vlan_id =%u",
vlan_id);
@@ -1059,7 +1059,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
unsigned int tmp_mask;
int ret = 0;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
mask);
return -EIO;
@@ -1252,7 +1252,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
- __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
hns3vf_service_handler(dev);
}
@@ -1264,7 +1264,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rte_eal_alarm_cancel(hns3vf_service_handler, dev);
- __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
}
static int
@@ -1500,10 +1500,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -1528,7 +1528,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1643,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1773,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -1847,7 +1847,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (ret)
return ret;
}
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return 0;
}
@@ -1888,7 +1888,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* from table space. Hence, for function reset software intervention is
* required to delete the entries.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -2030,7 +2030,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
}
static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
enum hns3_reset_level reset_level;
@@ -2070,10 +2070,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2082,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Hardware reset has been notified, we now have to poll & check if
@@ -2278,7 +2278,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30..26fa2eb 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ enum hns3_hw_err_report_type {
static int
hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
- int num, uint64_t *levels,
+ int num, RTE_ATOMIC(uint64_t) *levels,
enum hns3_hw_err_report_type err_type)
{
const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,7 @@ enum hns3_hw_err_report_type {
hw->reset.request = 0;
hw->reset.pending = 0;
hw->reset.resetting = 0;
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
if (!hw->reset.wait_data) {
@@ -2419,8 +2419,8 @@ enum hns3_hw_err_report_type {
/* Reschedule the reset process after successful initialization */
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+ rte_memory_order_relaxed);
return;
}
@@ -2428,15 +2428,15 @@ enum hns3_hw_err_report_type {
return;
/* Schedule restart alarm if it is not scheduled yet */
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_REQUESTED)
return;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED)
rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
}
@@ -2453,11 +2453,11 @@ enum hns3_hw_err_report_type {
return;
}
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) !=
SCHEDULE_NONE)
return;
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
}
@@ -2537,7 +2537,7 @@ enum hns3_hw_err_report_type {
}
static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
uint64_t merge_cnt = hw->reset.stats.merge_cnt;
uint64_t tmp;
@@ -2633,7 +2633,7 @@ enum hns3_hw_err_report_type {
* Regardless of whether the execution is successful or not, the
* flow after execution must be continued.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
(void)hns3_cmd_init(hw);
reset_fail:
hw->reset.attempts = 0;
@@ -2661,7 +2661,7 @@ enum hns3_hw_err_report_type {
int ret;
if (hw->reset.stage == RESET_STAGE_NONE) {
- __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_DOWN;
hns3_report_reset_begin(hw);
ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2750,7 @@ enum hns3_hw_err_report_type {
hns3_notify_reset_ready(hw, false);
hns3_clear_reset_level(hw, &hw->reset.pending);
hns3_clear_reset_status(hw);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.attempts = 0;
hw->reset.stats.success_cnt++;
hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2812,7 @@ enum hns3_hw_err_report_type {
hw->reset.mbuf_deferred_free = false;
}
rte_spinlock_unlock(&hw->lock);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_NONE;
hns3_clock_gettime(&tv);
timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c07..1edb07d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
};
int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
void hns3_handle_error(struct hns3_adapter *hns);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc16..10c6e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,7 @@
mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
while (wait_time < mbx_time_limit) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw, "Don't wait for mbx response because of "
"disable_cmd");
return -EBUSY;
@@ -382,7 +382,7 @@
rte_spinlock_lock(&hw->cmq.crq.lock);
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
@@ -457,7 +457,7 @@
}
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f194..ba8f8ec 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,7 @@
int i;
if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
- __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+ rte_atomic_load_explicit(&hw->secondary_cnt, rte_memory_order_relaxed) == 0)
return;
if (!mp_req_type_is_valid(type)) {
@@ -277,7 +277,7 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev)
ret);
return ret;
}
- __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
} else {
ret = hns3_mp_init_primary();
if (ret) {
@@ -297,7 +297,7 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
process_data.eth_dev_cnt--;
if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 7e636a0..73a388b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4464,7 +4464,7 @@
struct hns3_adapter *hns = eth_dev->data->dev_private;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+ rte_atomic_load_explicit(&hns->hw.reset.resetting, rte_memory_order_relaxed) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4530,7 +4530,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4586,7 +4586,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4615,7 +4615,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4648,7 +4648,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d969164..92a6685 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
/* don't goto fail_clear, user may try later */
@@ -1141,7 +1141,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
return -EBUSY;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 07/45] net/bnxt: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (5 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 06/45] net/hns3: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 08/45] net/cpfl: " Tyler Retzlaff
` (38 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/bnxt/bnxt_cpr.h | 4 ++--
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 ++++++++-----
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 ++--
5 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index c7b3480..43f06fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -107,7 +107,7 @@ struct bnxt_cp_ring_info {
/**
* Check validity of a completion ring entry. If the entry is valid, include a
- * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * C11 rte_memory_order_acquire fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
@@ -130,7 +130,7 @@ struct bnxt_cp_ring_info {
expected = !(raw_cons & ring_size);
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
if (valid == expected) {
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
return true;
}
return false;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 77bc382..36e0ac3 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- uint64_t rx_mbuf_alloc_fail;
+ RTE_ATOMIC(uint64_t) rx_mbuf_alloc_fail;
uint8_t need_realloc;
const struct rte_memzone *mz;
};
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 957b7d6..69e8384 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -49,7 +49,8 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -86,7 +87,8 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -465,7 +467,8 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -1677,8 +1680,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return -ENOMEM;
}
}
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 775400f..04864e0 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -240,7 +240,7 @@
rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
/* Use acquire fence to order loads of descriptor words. */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Reload lower 64b of descriptors to make it ordered after info3_v. */
rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
((void *)&cpr->cp_desc_ring[cons + 7],
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 6a6feab..479f819 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -663,7 +663,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -724,7 +724,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 08/45] net/cpfl: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (6 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 07/45] net/bnxt: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 09/45] net/af_xdp: " Tyler Retzlaff
` (37 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/cpfl/cpfl_ethdev.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ef19aa1..5b47e22 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -300,8 +300,9 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed +=
+ rte_atomic_load_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -349,7 +350,8 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 09/45] net/af_xdp: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (7 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 08/45] net/cpfl: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 10/45] net/octeon_ep: " Tyler Retzlaff
` (36 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 268a130..4833180 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -116,7 +116,7 @@ struct xsk_umem_info {
const struct rte_memzone *mz;
struct rte_mempool *mb_pool;
void *buffer;
- uint8_t refcnt;
+ RTE_ATOMIC(uint8_t) refcnt;
uint32_t max_xsks;
};
@@ -995,7 +995,8 @@ static int link_xdp_prog_with_dev(int ifindex, int fd, __u32 flags)
break;
xsk_socket__delete(rxq->xsk);
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1,
+ rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
/* free pkt_tx_queue */
@@ -1097,8 +1098,8 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
ret = -1;
goto out;
}
- if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
- __ATOMIC_ACQUIRE)) {
+ if (rte_atomic_load_explicit(&internals->rx_queues[i].umem->refcnt,
+ rte_memory_order_acquire)) {
*umem = internals->rx_queues[i].umem;
goto out;
}
@@ -1131,11 +1132,11 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
return NULL;
if (umem != NULL &&
- __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
+ rte_atomic_load_explicit(&umem->refcnt, rte_memory_order_acquire) <
umem->max_xsks) {
AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
internals->if_name, rxq->xsk_queue_idx);
- __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&umem->refcnt, 1, rte_memory_order_acquire);
}
}
@@ -1177,7 +1178,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
mb_pool->name, umem->max_xsks);
}
- __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&umem->refcnt, 1, rte_memory_order_release);
}
return umem;
@@ -1606,7 +1607,8 @@ struct msg_internal {
if (rxq->umem == NULL)
return -ENOMEM;
txq->umem = rxq->umem;
- reserve_before = __atomic_load_n(&rxq->umem->refcnt, __ATOMIC_ACQUIRE) <= 1;
+ reserve_before = rte_atomic_load_explicit(&rxq->umem->refcnt,
+ rte_memory_order_acquire) <= 1;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
@@ -1723,7 +1725,7 @@ struct msg_internal {
out_xsk:
xsk_socket__delete(rxq->xsk);
out_umem:
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1, rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
return ret;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 10/45] net/octeon_ep: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (8 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 09/45] net/af_xdp: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 11/45] net/octeontx: " Tyler Retzlaff
` (35 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +++--
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +++--
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx2_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx_ep_common.h | 4 ++--
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 ++++--
6 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.h b/drivers/net/octeon_ep/cnxk_ep_rx.h
index ecf95cd..9422042 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.h
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.h
@@ -98,7 +98,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(droq->pkts_sent_ism, rte_memory_order_relaxed);
new_pkts = val - droq->pkts_sent_prev;
droq->pkts_sent_prev = val;
@@ -111,7 +111,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_tx.c b/drivers/net/octeon_ep/cnxk_ep_tx.c
index 233c8aa..e093140 100644
--- a/drivers/net/octeon_ep/cnxk_ep_tx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_tx.c
@@ -15,7 +15,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(iq->inst_cnt_ism, rte_memory_order_relaxed);
iq->inst_cnt += val - iq->inst_cnt_prev;
iq->inst_cnt_prev = val;
@@ -27,7 +27,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 39f357e..39b28de 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -150,10 +150,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
- (void *)iq->inst_cnt_ism, ism_addr);
+ (void *)(uintptr_t)iq->inst_cnt_ism, ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
@@ -235,10 +235,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
- oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ oq_no, (void *)(uintptr_t)droq->pkts_sent_ism, ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 25e0e5a..2aeebb4 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -300,10 +300,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
- (void *)iq->inst_cnt_ism,
+ (void *)(uintptr_t)iq->inst_cnt_ism,
(unsigned int)ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
@@ -386,10 +386,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
- (void *)droq->pkts_sent_ism,
+ (void *)(uintptr_t)droq->pkts_sent_ism,
(unsigned int)ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 7776940..73eb0c9 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -218,7 +218,7 @@ struct otx_ep_iq_config {
*/
struct otx_ep_instr_queue {
/* Location in memory updated by SDP ISM */
- uint32_t *inst_cnt_ism;
+ RTE_ATOMIC(uint32_t) *inst_cnt_ism;
struct rte_mbuf **mbuf_list;
/* Pointer to the Virtual Base addr of the input ring. */
uint8_t *base_addr;
@@ -413,7 +413,7 @@ struct otx_ep_droq {
uint8_t ism_ena;
/* Pointer to host memory copy of output packet count, set by ISM */
- uint32_t *pkts_sent_ism;
+ RTE_ATOMIC(uint32_t) *pkts_sent_ism;
uint32_t pkts_sent_prev;
/* Statistics for this DROQ. */
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 59144e0..eb2d8c1 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -475,7 +475,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
@@ -871,7 +872,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 11/45] net/octeontx: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (9 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 10/45] net/octeon_ep: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 12/45] net/cxgbe: " Tyler Retzlaff
` (34 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/octeontx/octeontx_ethdev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index e397136..3c21540 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -31,7 +31,7 @@
/* Useful in stopping/closing event device if no of
* eth ports are using it.
*/
-uint16_t evdev_refcnt;
+RTE_ATOMIC(uint16_t) evdev_refcnt;
#define OCTEONTX_QLM_MODE_SGMII 7
#define OCTEONTX_QLM_MODE_XFI 12
@@ -559,7 +559,7 @@ enum octeontx_link_speed {
return 0;
/* Stopping/closing event device once all eth ports are closed. */
- if (__atomic_fetch_sub(&evdev_refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&evdev_refcnt, 1, rte_memory_order_acquire) - 1 == 0) {
rte_event_dev_stop(nic->evdev);
rte_event_dev_close(nic->evdev);
}
@@ -1593,7 +1593,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
nic->pko_vfid = pko_vfid;
nic->port_id = port;
nic->evdev = evdev;
- __atomic_fetch_add(&evdev_refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&evdev_refcnt, 1, rte_memory_order_acquire);
res = octeontx_port_open(nic);
if (res < 0)
@@ -1844,7 +1844,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
}
}
- __atomic_store_n(&evdev_refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&evdev_refcnt, 0, rte_memory_order_release);
/*
* Do 1:1 links for ports & queues. All queues would be mapped to
* one port. If there are more ports than queues, then some ports
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 12/45] net/cxgbe: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (10 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 11/45] net/octeontx: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 13/45] net/gve: " Tyler Retzlaff
` (33 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/cxgbe/clip_tbl.c | 12 ++++++------
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 ++++++++++----------
drivers/net/cxgbe/cxgbe_ofld.h | 6 +++---
drivers/net/cxgbe/l2t.c | 12 ++++++------
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +++++++++++----------
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 ++++++------
drivers/net/cxgbe/smt.h | 2 +-
10 files changed, 46 insertions(+), 45 deletions(-)
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
index b709e26..8588b88 100644
--- a/drivers/net/cxgbe/clip_tbl.c
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -55,7 +55,7 @@ void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
int ret;
t4_os_lock(&ce->lock);
- if (__atomic_fetch_sub(&ce->refcnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ce->refcnt, 1, rte_memory_order_relaxed) - 1 == 0) {
ret = clip6_release_mbox(dev, ce->addr);
if (ret)
dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
@@ -79,7 +79,7 @@ static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
unsigned int clipt_size = c->clipt_size;
for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -114,12 +114,12 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce = find_or_alloc_clipe(ctbl, lip);
if (ce) {
t4_os_lock(&ce->lock);
- if (__atomic_load_n(&ce->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&ce->refcnt, rte_memory_order_relaxed) == 0) {
rte_memcpy(ce->addr, lip, sizeof(ce->addr));
if (v6) {
ce->type = FILTER_TYPE_IPV6;
- __atomic_store_n(&ce->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ce->refcnt, 1,
+ rte_memory_order_relaxed);
ret = clip6_get_mbox(dev, lip);
if (ret)
dev_debug(adap,
@@ -129,7 +129,7 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce->type = FILTER_TYPE_IPV4;
}
} else {
- __atomic_fetch_add(&ce->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ce->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&ce->lock);
}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
index 3b2be66..439fcf6 100644
--- a/drivers/net/cxgbe/clip_tbl.h
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -13,7 +13,7 @@ struct clip_entry {
enum filter_type type; /* entry type */
u32 addr[4]; /* IPV4 or IPV6 address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct clip_tbl {
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index c479454..2ed21f2 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -418,15 +418,15 @@ void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- __atomic_fetch_sub(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
}
@@ -448,15 +448,15 @@ void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
t->tid_tab[tid] = data;
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
- __atomic_fetch_add(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
index 7a4e30d..fd1e7d8 100644
--- a/drivers/net/cxgbe/cxgbe_ofld.h
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -60,10 +60,10 @@ struct tid_info {
unsigned int atids_in_use;
/* TIDs in the TCAM */
- u32 tids_in_use;
+ RTE_ATOMIC(u32) tids_in_use;
/* TIDs in the HASH */
- u32 hash_tids_in_use;
- u32 conns_in_use;
+ RTE_ATOMIC(u32) hash_tids_in_use;
+ RTE_ATOMIC(u32) conns_in_use;
alignas(RTE_CACHE_LINE_SIZE) rte_spinlock_t atid_lock;
rte_spinlock_t ftid_lock;
diff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c
index 21f4019..ecb5fec 100644
--- a/drivers/net/cxgbe/l2t.c
+++ b/drivers/net/cxgbe/l2t.c
@@ -14,8 +14,8 @@
*/
void cxgbe_l2t_release(struct l2t_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -112,7 +112,7 @@ static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
struct l2t_entry *first_free = NULL;
for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -151,18 +151,18 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
diff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h
index e4c0ebe..67d0197 100644
--- a/drivers/net/cxgbe/l2t.h
+++ b/drivers/net/cxgbe/l2t.h
@@ -30,7 +30,7 @@ struct l2t_entry {
u8 lport; /* destination port */
u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct l2t_data {
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
index 8e0da9c..79a7daa 100644
--- a/drivers/net/cxgbe/mps_tcam.c
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -76,7 +76,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- __atomic_fetch_add(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
@@ -98,7 +98,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
entry = &mpstcam->entry[ret];
memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
if (cxgbe_update_free_idx(mpstcam))
@@ -147,7 +147,7 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
* provided value is -1
*/
if (entry->state == MPS_ENTRY_UNUSED) {
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
}
@@ -165,7 +165,7 @@ static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
{
memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_UNUSED;
}
@@ -190,12 +190,13 @@ int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
return -EINVAL;
}
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
entry->mask, idx, 1, pi->port_id,
false);
else
- ret = __atomic_fetch_sub(&entry->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&entry->refcnt, 1,
+ rte_memory_order_relaxed) - 1;
if (ret == 0) {
reset_mpstcam_entry(entry);
@@ -222,7 +223,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
goto out_unlock;
ret = t4_alloc_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -231,7 +232,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
@@ -253,7 +254,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) != 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) != 1)
goto out_unlock;
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -262,7 +263,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
diff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h
index 363786b..4b421f7 100644
--- a/drivers/net/cxgbe/mps_tcam.h
+++ b/drivers/net/cxgbe/mps_tcam.h
@@ -29,7 +29,7 @@ struct mps_tcam_entry {
u8 mask[RTE_ETHER_ADDR_LEN];
struct mpstcam_table *mpstcam; /* backptr */
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
};
struct mpstcam_table {
diff --git a/drivers/net/cxgbe/smt.c b/drivers/net/cxgbe/smt.c
index 4e14a73..2f961c1 100644
--- a/drivers/net/cxgbe/smt.c
+++ b/drivers/net/cxgbe/smt.c
@@ -119,7 +119,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end, *first_free = NULL;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -156,7 +156,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_smte(s, smac);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->pfvf = pfvf;
rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
ret = write_smt_entry(dev, e);
@@ -168,9 +168,9 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
goto out_write_unlock;
}
e->state = SMT_STATE_SWITCHING;
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
@@ -195,8 +195,8 @@ struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
void cxgbe_smt_release(struct smt_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/smt.h b/drivers/net/cxgbe/smt.h
index 531810e..8b378ae 100644
--- a/drivers/net/cxgbe/smt.h
+++ b/drivers/net/cxgbe/smt.h
@@ -23,7 +23,7 @@ struct smt_entry {
u16 pfvf;
u16 hw_idx;
u8 src_mac[RTE_ETHER_ADDR_LEN];
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
rte_spinlock_t lock;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 13/45] net/gve: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (11 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 12/45] net/cxgbe: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 14/45] net/memif: " Tyler Retzlaff
` (32 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/gve/base/gve_osdep.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/gve/base/gve_osdep.h b/drivers/net/gve/base/gve_osdep.h
index a3702f4..c0ee0d5 100644
--- a/drivers/net/gve/base/gve_osdep.h
+++ b/drivers/net/gve/base/gve_osdep.h
@@ -135,7 +135,7 @@ struct gve_dma_mem {
static inline void *
gve_alloc_dma_mem(struct gve_dma_mem *mem, u64 size)
{
- static uint16_t gve_dma_memzone_id;
+ static RTE_ATOMIC(uint16_t) gve_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -143,7 +143,7 @@ struct gve_dma_mem {
return NULL;
snprintf(z_name, sizeof(z_name), "gve_dma_%u",
- __atomic_fetch_add(&gve_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&gve_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG,
PAGE_SIZE);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 14/45] net/memif: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (12 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 13/45] net/gve: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 15/45] net/thunderx: " Tyler Retzlaff
` (31 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/memif/memif.h | 4 ++--
drivers/net/memif/rte_eth_memif.c | 50 +++++++++++++++++++--------------------
2 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/memif/memif.h b/drivers/net/memif/memif.h
index f5a4693..3f5b407 100644
--- a/drivers/net/memif/memif.h
+++ b/drivers/net/memif/memif.h
@@ -169,9 +169,9 @@ typedef struct __rte_packed __rte_aligned(128)
uint32_t cookie; /**< MEMIF_COOKIE */
uint16_t flags; /**< flags */
#define MEMIF_RING_FLAG_MASK_INT 1 /**< disable interrupt mode */
- uint16_t head; /**< pointer to ring buffer head */
+ RTE_ATOMIC(uint16_t) head; /**< pointer to ring buffer head */
MEMIF_CACHELINE_ALIGN_MARK(cacheline1);
- uint16_t tail; /**< pointer to ring buffer tail */
+ RTE_ATOMIC(uint16_t) tail; /**< pointer to ring buffer tail */
MEMIF_CACHELINE_ALIGN_MARK(cacheline2);
memif_desc_t desc[0]; /**< buffer descriptors */
} memif_ring_t;
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 18377d9..16da22b 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -262,7 +262,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* in function eth_memif_rx for C2S queues.
*/
- cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ cur_tail = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
while (mq->last_tail != cur_tail) {
RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
@@ -334,10 +334,10 @@ struct mp_region_msg {
if (type == MEMIF_RING_C2S) {
cur_slot = mq->last_head;
- last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire);
} else {
cur_slot = mq->last_tail;
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
}
if (cur_slot == last_slot)
@@ -473,7 +473,7 @@ struct mp_region_msg {
no_free_bufs:
if (type == MEMIF_RING_C2S) {
- __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, cur_slot, rte_memory_order_release);
mq->last_head = cur_slot;
} else {
mq->last_tail = cur_slot;
@@ -485,7 +485,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
while (n_slots--) {
@@ -493,7 +493,7 @@ struct mp_region_msg {
d0 = &ring->desc[s0];
d0->length = pmd->run.pkt_buffer_size;
}
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
}
mq->n_pkts += n_rx_pkts;
@@ -541,7 +541,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* to synchronize it between threads.
*/
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
if (cur_slot == last_slot)
goto refill;
n_slots = last_slot - cur_slot;
@@ -591,7 +591,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
if (n_slots < 32)
@@ -620,7 +620,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_tx.
*/
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
mq->n_pkts += n_rx_pkts;
@@ -668,9 +668,9 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot +
- __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
} else {
/* For S2C queues ring->tail is updated by the sender and
* this function is called in the context of sending thread.
@@ -678,8 +678,8 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
- n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
+ slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_relaxed);
+ n_free = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire) - slot;
}
uint16_t i;
@@ -792,9 +792,9 @@ struct mp_region_msg {
no_free_slots:
if (type == MEMIF_RING_C2S)
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
else
- __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, slot, rte_memory_order_release);
if (((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) &&
(rte_intr_fd_get(mq->intr_handle) >= 0)) {
@@ -882,7 +882,7 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot + mq->last_tail;
int used_slots;
@@ -942,7 +942,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_rx for C2S rings.
*/
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
/* Send interrupt, if enabled. */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
@@ -1155,8 +1155,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_c2s_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1175,8 +1175,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_s2c_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1314,8 +1314,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
@@ -1330,8 +1330,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 15/45] net/thunderx: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (13 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 14/45] net/memif: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 16/45] net/virtio: " Tyler Retzlaff
` (30 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/thunderx/nicvf_rxtx.c | 9 +++++----
drivers/net/thunderx/nicvf_struct.h | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 74f43b9..76b6fdb 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -374,8 +374,8 @@ static const alignas(RTE_CACHE_LINE_SIZE) uint32_t ptype_table[16][16] = {
NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
(nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
- next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
- __ATOMIC_ACQUIRE);
+ next_tail = rte_atomic_fetch_add_explicit(&rbdr->next_tail, to_fill,
+ rte_memory_order_acquire);
ltail = next_tail;
for (i = 0; i < to_fill; i++) {
struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
@@ -385,9 +385,10 @@ static const alignas(RTE_CACHE_LINE_SIZE) uint32_t ptype_table[16][16] = {
ltail++;
}
- rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&rbdr->tail, next_tail,
+ rte_memory_order_relaxed);
- __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&rbdr->tail, ltail, rte_memory_order_release);
nicvf_addr_write(door, to_fill);
return to_fill;
}
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index cfcd942..60d3ec0 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -20,8 +20,8 @@ struct __rte_cache_aligned nicvf_rbdr {
struct rbdr_entry_t *desc;
nicvf_iova_addr_t phys;
uint32_t buffsz;
- uint32_t tail;
- uint32_t next_tail;
+ RTE_ATOMIC(uint32_t) tail;
+ RTE_ATOMIC(uint32_t) next_tail;
uint32_t head;
uint32_t qlen_mask;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 16/45] net/virtio: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (14 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 15/45] net/thunderx: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 17/45] net/hinic: " Tyler Retzlaff
` (29 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/virtio/virtio_ring.h | 4 +--
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 ++++-----
drivers/net/virtio/virtqueue.h | 32 ++++++++++++------------
3 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b..2a25751 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -59,7 +59,7 @@ struct vring_used_elem {
struct vring_used {
uint16_t flags;
- uint16_t idx;
+ RTE_ATOMIC(uint16_t) idx;
struct vring_used_elem ring[];
};
@@ -70,7 +70,7 @@ struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t id;
- uint16_t flags;
+ RTE_ATOMIC(uint16_t) flags;
};
#define RING_EVENT_FLAGS_ENABLE 0x0
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4fdfe70..24e2b2c 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -948,7 +948,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+ uint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -1037,8 +1037,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vring->desc[vq->used_idx].flags, flags,
+ rte_memory_order_release);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {
@@ -1057,9 +1057,9 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
struct vring *vring = &dev->vrings.split[queue_idx];
/* Consume avail ring, using used ring idx as first one */
- while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ while (rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
!= vring->avail->idx) {
- avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ avail_idx = rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
& (vring->num - 1);
desc_idx = vring->avail->ring[avail_idx];
@@ -1070,7 +1070,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
uep->id = desc_idx;
uep->len = n_descs;
- __atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&vring->used->idx, 1, rte_memory_order_relaxed);
}
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 75d70f1..60211a4 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -37,7 +37,7 @@
virtio_mb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
else
rte_mb();
}
@@ -46,7 +46,7 @@
virtio_rmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
else
rte_io_rmb();
}
@@ -55,7 +55,7 @@
virtio_wmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
else
rte_io_wmb();
}
@@ -67,12 +67,12 @@
uint16_t flags;
if (weak_barriers) {
-/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
+/* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+ flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);
#else
flags = dp->flags;
rte_io_rmb();
@@ -90,12 +90,12 @@
uint16_t flags, uint8_t weak_barriers)
{
if (weak_barriers) {
-/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
+/* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);
#else
rte_io_wmb();
dp->flags = flags;
@@ -425,7 +425,7 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
if (vq->hw->weak_barriers) {
/**
- * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+ * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it
* reports a slightly better perf, which comes from the saved
* branch by the compiler.
* The if and else branches are identical with the smp and io
@@ -435,8 +435,8 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
idx = vq->vq_split.ring.used->idx;
rte_smp_rmb();
#else
- idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
- __ATOMIC_ACQUIRE);
+ idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,
+ rte_memory_order_acquire);
#endif
} else {
idx = vq->vq_split.ring.used->idx;
@@ -454,7 +454,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
vq_update_avail_idx(struct virtqueue *vq)
{
if (vq->hw->weak_barriers) {
- /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+ /* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as
* it reports a slightly better perf, which comes from the
* saved branch by the compiler.
* The if and else branches are identical with the smp and
@@ -464,8 +464,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
rte_smp_wmb();
vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
#else
- __atomic_store_n(&vq->vq_split.ring.avail->idx,
- vq->vq_avail_idx, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,
+ vq->vq_avail_idx, rte_memory_order_release);
#endif
} else {
rte_io_wmb();
@@ -528,8 +528,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
- used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
- __ATOMIC_RELAXED); \
+ used_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \
+ rte_memory_order_relaxed); \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
if (virtio_with_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
@@ -546,7 +546,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
" avail.flags=0x%x; used.flags=0x%x", \
(vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
(vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
- __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+ rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, rte_memory_order_relaxed), \
(vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
} while (0)
#else
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 17/45] net/hinic: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (15 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 16/45] net/virtio: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:57 ` [PATCH v5 18/45] net/idpf: " Tyler Retzlaff
` (28 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 7adb6e3..c2cd295 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -1004,7 +1004,7 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
while (pkts < nb_pkts) {
/* 2. current ci is done */
rx_cqe = &rxq->rx_cqe[sw_ci];
- status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
+ status = rte_atomic_load_explicit(&rx_cqe->status, rte_memory_order_acquire);
if (!HINIC_GET_RX_DONE_BE(status))
break;
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 2dde3ec..43c236b 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -33,7 +33,7 @@ struct __rte_cache_aligned hinic_rq_cqe {
#else
struct hinic_rq_cqe {
#endif
- u32 status;
+ RTE_ATOMIC(u32) status;
u32 vlan_len;
u32 offload_type;
u32 rss_hash;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 18/45] net/idpf: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (16 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 17/45] net/hinic: " Tyler Retzlaff
@ 2024-05-06 17:57 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 19/45] net/qede: " Tyler Retzlaff
` (27 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:57 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/idpf/idpf_ethdev.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 86151c9..1df4d6b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -259,8 +259,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed += rte_atomic_load_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -308,7 +308,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&rxq->rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 19/45] net/qede: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (17 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 18/45] net/idpf: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 20/45] net/ring: " Tyler Retzlaff
` (26 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/qede/base/bcm_osal.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2edeb38..abd1186 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -51,11 +51,11 @@ void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
/* Counter to track current memzone allocated */
static uint16_t ecore_mz_count;
-static uint32_t ref_cnt;
+static RTE_ATOMIC(uint32_t) ref_cnt;
int ecore_mz_mapping_alloc(void)
{
- if (__atomic_fetch_add(&ref_cnt, 1, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_fetch_add_explicit(&ref_cnt, 1, rte_memory_order_relaxed) == 0) {
ecore_mz_mapping = rte_calloc("ecore_mz_map",
rte_memzone_max_get(), sizeof(struct rte_memzone *), 0);
}
@@ -68,7 +68,7 @@ int ecore_mz_mapping_alloc(void)
void ecore_mz_mapping_free(void)
{
- if (__atomic_fetch_sub(&ref_cnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ref_cnt, 1, rte_memory_order_relaxed) - 1 == 0) {
rte_free(ecore_mz_mapping);
ecore_mz_mapping = NULL;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 20/45] net/ring: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (18 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 19/45] net/qede: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 21/45] vdpa/mlx5: " Tyler Retzlaff
` (25 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ring/rte_eth_ring.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 48953dd..b16f5d5 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -44,8 +44,8 @@ enum dev_action {
struct ring_queue {
struct rte_ring *rng;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_internals {
@@ -82,7 +82,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts += nb_rx;
else
- __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->rx_pkts, nb_rx, rte_memory_order_relaxed);
return nb_rx;
}
@@ -96,7 +96,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SP_ENQ)
r->tx_pkts += nb_tx;
else
- __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->tx_pkts, nb_tx, rte_memory_order_relaxed);
return nb_tx;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 21/45] vdpa/mlx5: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (19 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 20/45] net/ring: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 22/45] raw/ifpga: " Tyler Retzlaff
` (24 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +++++++++---------
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +++++------
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 ++-
6 files changed, 52 insertions(+), 44 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f900384..98c39a5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -261,8 +261,8 @@
uint32_t timeout = 0;
/* Check and wait all close tasks done. */
- while (__atomic_load_n(&priv->dev_close_progress,
- __ATOMIC_RELAXED) != 0 && timeout < 1000) {
+ while (rte_atomic_load_explicit(&priv->dev_close_progress,
+ rte_memory_order_relaxed) != 0 && timeout < 1000) {
rte_delay_us_sleep(10000);
timeout++;
}
@@ -294,8 +294,8 @@
priv->last_c_thrd_idx = 0;
else
priv->last_c_thrd_idx++;
- __atomic_store_n(&priv->dev_close_progress,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress,
+ 1, rte_memory_order_relaxed);
if (mlx5_vdpa_task_add(priv,
priv->last_c_thrd_idx,
MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
@@ -319,8 +319,8 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress, 0,
+ rte_memory_order_relaxed);
priv->state = MLX5_VDPA_STATE_PROBED;
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
@@ -664,7 +664,9 @@
static int
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t max_queues, index, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
@@ -847,8 +849,8 @@
if (conf_thread_mng.initializer_priv == priv)
if (mlx5_vdpa_mult_threads_create())
goto error;
- __atomic_fetch_add(&conf_thread_mng.refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
+ rte_memory_order_relaxed);
}
if (mlx5_vdpa_create_dev_resources(priv))
goto error;
@@ -937,8 +939,8 @@
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
if (priv->use_c_thread)
- if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
- 1, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
+ 1, rte_memory_order_relaxed) == 1)
mlx5_vdpa_mult_threads_destroy(true);
rte_free(priv);
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 4ce6977..e156520 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type {
struct __rte_aligned(4) mlx5_vdpa_task {
struct mlx5_vdpa_priv *priv;
enum mlx5_vdpa_task_type type;
- uint32_t *remaining_cnt;
- uint32_t *err_cnt;
+ RTE_ATOMIC(uint32_t) *remaining_cnt;
+ RTE_ATOMIC(uint32_t) *err_cnt;
uint32_t idx;
} __rte_packed;
@@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread {
struct mlx5_vdpa_conf_thread_mng {
void *initializer_priv;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
uint32_t max_thrds;
pthread_mutex_t cthrd_lock;
struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
@@ -212,7 +212,7 @@ struct mlx5_vdpa_priv {
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
uint16_t last_c_thrd_idx;
- uint16_t dev_close_progress;
+ RTE_ATOMIC(uint16_t) dev_close_progress;
uint16_t num_mrs; /* Number of memory regions. */
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
@@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num);
int
mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time);
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
int
mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
void
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
index 68ed841..84f611c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
@@ -48,7 +48,7 @@
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num)
{
struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
@@ -70,8 +70,8 @@
return -1;
for (i = 0 ; i < num; i++)
if (task[i].remaining_cnt)
- __atomic_fetch_add(task[i].remaining_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
+ rte_memory_order_relaxed);
/* wake up conf thread. */
pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
@@ -80,16 +80,16 @@
}
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time)
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
{
/* Check and wait all tasks done. */
- while (__atomic_load_n(remaining_cnt,
- __ATOMIC_RELAXED) != 0) {
+ while (rte_atomic_load_explicit(remaining_cnt,
+ rte_memory_order_relaxed) != 0) {
rte_delay_us_sleep(sleep_time);
}
- if (__atomic_load_n(err_cnt,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(err_cnt,
+ rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Tasks done with error.");
return true;
}
@@ -137,8 +137,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to register mr %d.", task.idx);
- __atomic_fetch_add(task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task.err_cnt, 1,
+ rte_memory_order_relaxed);
}
break;
case MLX5_VDPA_TASK_SETUP_VIRTQ:
@@ -149,8 +149,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to setup virtq %d.", task.idx);
- __atomic_fetch_add(
- task.err_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ task.err_cnt, 1, rte_memory_order_relaxed);
}
virtq->enable = 1;
pthread_mutex_unlock(&virtq->virtq_lock);
@@ -164,9 +164,9 @@
DRV_LOG(ERR,
"Failed to stop virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -176,9 +176,9 @@
DRV_LOG(ERR,
"Failed to get negotiated features virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -200,9 +200,9 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(
+ rte_atomic_store_explicit(
&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
break;
case MLX5_VDPA_TASK_PREPARE_VIRTQ:
ret = mlx5_vdpa_virtq_single_resource_prepare(
@@ -211,9 +211,9 @@
DRV_LOG(ERR,
"Failed to prepare virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
break;
default:
@@ -222,8 +222,8 @@
break;
}
if (task.remaining_cnt)
- __atomic_fetch_sub(task.remaining_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(task.remaining_cnt,
+ 1, rte_memory_order_relaxed);
}
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 0fa671f..a207734 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -92,7 +92,9 @@
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
uint64_t features;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index e333f0b..4dfe800 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -279,7 +279,9 @@
uint8_t mode = 0;
int ret = -rte_errno;
uint32_t i, thrd_idx, data[1];
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
(priv->vid, &mode, &priv->vmem_info.size,
&priv->vmem_info.gcd, &priv->vmem_info.entries_num);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 607e290..093cdd0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -666,7 +666,9 @@
{
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
struct rte_vhost_vring vq;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 22/45] raw/ifpga: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (20 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 21/45] vdpa/mlx5: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-07 10:06 ` Xu, Rosen
2024-05-06 17:58 ` [PATCH v5 23/45] event/opdl: " Tyler Retzlaff
` (23 subsequent siblings)
45 siblings, 1 reply; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/raw/ifpga/ifpga_rawdev.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c
index f89bd3f..78d3c88 100644
--- a/drivers/raw/ifpga/ifpga_rawdev.c
+++ b/drivers/raw/ifpga/ifpga_rawdev.c
@@ -73,7 +73,7 @@
static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
-static int ifpga_monitor_refcnt;
+static RTE_ATOMIC(int) ifpga_monitor_refcnt;
static rte_thread_t ifpga_monitor_start_thread;
static struct ifpga_rawdev *
@@ -512,7 +512,7 @@ static int set_surprise_link_check_aer(
int gsd_enable, ret;
#define MS 1000
- while (__atomic_load_n(&ifpga_monitor_refcnt, __ATOMIC_RELAXED)) {
+ while (rte_atomic_load_explicit(&ifpga_monitor_refcnt, rte_memory_order_relaxed)) {
gsd_enable = 0;
for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
ifpga_rdev = &ifpga_rawdevices[i];
@@ -549,7 +549,7 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 1;
- if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_fetch_add_explicit(&ifpga_monitor_refcnt, 1, rte_memory_order_relaxed)) {
ret = rte_thread_create_internal_control(&ifpga_monitor_start_thread,
"ifpga-mon", ifpga_rawdev_gsd_handle, NULL);
if (ret != 0) {
@@ -573,7 +573,8 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 0;
- if (!(__atomic_fetch_sub(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED) - 1) &&
+ if (!(rte_atomic_fetch_sub_explicit(&ifpga_monitor_refcnt, 1,
+ rte_memory_order_relaxed) - 1) &&
ifpga_monitor_start_thread.opaque_id != 0) {
ret = pthread_cancel((pthread_t)ifpga_monitor_start_thread.opaque_id);
if (ret)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* RE: [PATCH v5 22/45] raw/ifpga: use rte stdatomic API
2024-05-06 17:58 ` [PATCH v5 22/45] raw/ifpga: " Tyler Retzlaff
@ 2024-05-07 10:06 ` Xu, Rosen
0 siblings, 0 replies; 300+ messages in thread
From: Xu, Rosen @ 2024-05-07 10:06 UTC (permalink / raw)
To: Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Sevincer, Abdullah,
Ajit Khaparde, Alok Prasad, Burakov, Anatoly, Andrew Rybchenko,
Anoob Joseph, Richardson, Bruce, Marohn, Byron, Chenbo Xia,
Chengwen Feng, Loftus, Ciara, Power, Ciara, Dariusz Sosnowski,
Hunt, David, Devendra Singh Rawat, Carrillo, Erik G,
Guoyang Zhou, Harman Kalra, Van Haaren, Harry, Nagarahalli,
Honnappa, Jakub Grajciar, Jerin Jacob, Jeroen de Borst,
Jian Wang, Jiawen Wu, Jie Hai, Wu, Jingjing, Joshua Washington,
Joyce Kong, Guo, Junfeng, Laatz, Kevin, Konstantin Ananyev,
Liang Ma, Long Li, Maciej Czekaj, Matan Azrad, Maxime Coquelin,
Chautru, Nicolas, Ori Kam, Pavan Nikhilesh, Mccarthy, Peter,
Rahul Lakkireddy, Pattan, Reshma, Ruifeng Wang, Rushil Gupta,
Gobriel, Sameh, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Medvedkin, Vladimir, Xiaoyun Wang, Wang,
Yipeng1, Yisen Zhuang, Ziyang Xuan
Hi,
> -----Original Message-----
> From: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Sent: Tuesday, May 7, 2024 1:58 AM
> To: dev@dpdk.org
> Cc: Mattias Rönnblom <mattias.ronnblom@ericsson.com>; Morten Brørup
> <mb@smartsharesystems.com>; Sevincer, Abdullah
> <abdullah.sevincer@intel.com>; Ajit Khaparde
> <ajit.khaparde@broadcom.com>; Alok Prasad <palok@marvell.com>;
> Burakov, Anatoly <anatoly.burakov@intel.com>; Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru>; Anoob Joseph <anoobj@marvell.com>;
> Richardson, Bruce <bruce.richardson@intel.com>; Marohn, Byron
> <byron.marohn@intel.com>; Chenbo Xia <chenbox@nvidia.com>;
> Chengwen Feng <fengchengwen@huawei.com>; Loftus, Ciara
> <ciara.loftus@intel.com>; Power, Ciara <ciara.power@intel.com>; Dariusz
> Sosnowski <dsosnowski@nvidia.com>; Hunt, David <david.hunt@intel.com>;
> Devendra Singh Rawat <dsinghrawat@marvell.com>; Carrillo, Erik G
> <erik.g.carrillo@intel.com>; Guoyang Zhou <zhouguoyang@huawei.com>;
> Harman Kalra <hkalra@marvell.com>; Van Haaren, Harry
> <harry.van.haaren@intel.com>; Nagarahalli, Honnappa
> <Honnappa.Nagarahalli@arm.com>; Jakub Grajciar <jgrajcia@cisco.com>;
> Jerin Jacob <jerinj@marvell.com>; Jeroen de Borst <jeroendb@google.com>;
> Jian Wang <jianwang@trustnetic.com>; Jiawen Wu
> <jiawenwu@trustnetic.com>; Jie Hai <haijie1@huawei.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Joshua Washington <joshwash@google.com>;
> Joyce Kong <joyce.kong@arm.com>; Guo, Junfeng
> <junfeng.guo@intel.com>; Laatz, Kevin <kevin.laatz@intel.com>; Konstantin
> Ananyev <konstantin.v.ananyev@yandex.ru>; Liang Ma
> <liangma@liangbit.com>; Long Li <longli@microsoft.com>; Maciej Czekaj
> <mczekaj@marvell.com>; Matan Azrad <matan@nvidia.com>; Maxime
> Coquelin <maxime.coquelin@redhat.com>; Chautru, Nicolas
> <nicolas.chautru@intel.com>; Ori Kam <orika@nvidia.com>; Pavan Nikhilesh
> <pbhagavatula@marvell.com>; Mccarthy, Peter
> <peter.mccarthy@intel.com>; Rahul Lakkireddy
> <rahul.lakkireddy@chelsio.com>; Pattan, Reshma
> <reshma.pattan@intel.com>; Xu, Rosen <rosen.xu@intel.com>; Ruifeng
> Wang <ruifeng.wang@arm.com>; Rushil Gupta <rushilg@google.com>;
> Gobriel, Sameh <sameh.gobriel@intel.com>; Sivaprasad Tummala
> <sivaprasad.tummala@amd.com>; Somnath Kotur
> <somnath.kotur@broadcom.com>; Stephen Hemminger
> <stephen@networkplumber.org>; Suanming Mou
> <suanmingm@nvidia.com>; Sunil Kumar Kori <skori@marvell.com>; Sunil
> Uttarwar <sunilprakashrao.uttarwar@amd.com>; Tetsuya Mukawa
> <mtetsuyah@gmail.com>; Vamsi Attunuru <vattunuru@marvell.com>;
> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Medvedkin, Vladimir
> <vladimir.medvedkin@intel.com>; Xiaoyun Wang
> <cloud.wangxiaoyun@huawei.com>; Wang, Yipeng1
> <yipeng1.wang@intel.com>; Yisen Zhuang <yisen.zhuang@huawei.com>;
> Ziyang Xuan <xuanziyang2@huawei.com>; Tyler Retzlaff
> <roretzla@linux.microsoft.com>
> Subject: [PATCH v5 22/45] raw/ifpga: use rte stdatomic API
>
> Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding
> rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
> drivers/raw/ifpga/ifpga_rawdev.c | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/raw/ifpga/ifpga_rawdev.c
> b/drivers/raw/ifpga/ifpga_rawdev.c
> index f89bd3f..78d3c88 100644
> --- a/drivers/raw/ifpga/ifpga_rawdev.c
> +++ b/drivers/raw/ifpga/ifpga_rawdev.c
> @@ -73,7 +73,7 @@
>
> static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
>
> -static int ifpga_monitor_refcnt;
> +static RTE_ATOMIC(int) ifpga_monitor_refcnt;
> static rte_thread_t ifpga_monitor_start_thread;
>
> static struct ifpga_rawdev *
> @@ -512,7 +512,7 @@ static int set_surprise_link_check_aer(
> int gsd_enable, ret;
> #define MS 1000
>
> - while (__atomic_load_n(&ifpga_monitor_refcnt,
> __ATOMIC_RELAXED)) {
> + while (rte_atomic_load_explicit(&ifpga_monitor_refcnt,
> +rte_memory_order_relaxed)) {
> gsd_enable = 0;
> for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
> ifpga_rdev = &ifpga_rawdevices[i];
> @@ -549,7 +549,7 @@ static int set_surprise_link_check_aer(
>
> dev->poll_enabled = 1;
>
> - if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1,
> __ATOMIC_RELAXED)) {
> + if (!rte_atomic_fetch_add_explicit(&ifpga_monitor_refcnt, 1,
> +rte_memory_order_relaxed)) {
> ret =
> rte_thread_create_internal_control(&ifpga_monitor_start_thread,
> "ifpga-mon", ifpga_rawdev_gsd_handle,
> NULL);
> if (ret != 0) {
> @@ -573,7 +573,8 @@ static int set_surprise_link_check_aer(
>
> dev->poll_enabled = 0;
>
> - if (!(__atomic_fetch_sub(&ifpga_monitor_refcnt, 1,
> __ATOMIC_RELAXED) - 1) &&
> + if (!(rte_atomic_fetch_sub_explicit(&ifpga_monitor_refcnt, 1,
> + rte_memory_order_relaxed) - 1) &&
> ifpga_monitor_start_thread.opaque_id != 0) {
> ret =
> pthread_cancel((pthread_t)ifpga_monitor_start_thread.opaque_id);
> if (ret)
> --
> 1.8.3.1
Reviewed-by: Rosen Xu <rosen.xu@intel.com>
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 23/45] event/opdl: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (21 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 22/45] raw/ifpga: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 24/45] event/octeontx: " Tyler Retzlaff
` (22 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/opdl/opdl_ring.c | 80 +++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index e87ffd5..3476f6b 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -47,12 +47,12 @@ struct __rte_cache_aligned shared_state {
/* Last known minimum sequence number of dependencies, used for multi
* thread operation
*/
- uint32_t available_seq;
+ RTE_ATOMIC(uint32_t) available_seq;
char _pad1[RTE_CACHE_LINE_SIZE * 3];
- uint32_t head; /* Head sequence number (for multi thread operation) */
+ RTE_ATOMIC(uint32_t) head; /* Head sequence number (for multi thread operation) */
char _pad2[RTE_CACHE_LINE_SIZE * 3];
struct opdl_stage *stage; /* back pointer */
- uint32_t tail; /* Tail sequence number */
+ RTE_ATOMIC(uint32_t) tail; /* Tail sequence number */
char _pad3[RTE_CACHE_LINE_SIZE * 2];
};
@@ -149,10 +149,10 @@ struct opdl_ring {
available(const struct opdl_stage *s)
{
if (s->threadsafe == true) {
- uint32_t n = __atomic_load_n(&s->shared.available_seq,
- __ATOMIC_ACQUIRE) -
- __atomic_load_n(&s->shared.head,
- __ATOMIC_ACQUIRE);
+ uint32_t n = rte_atomic_load_explicit(&s->shared.available_seq,
+ rte_memory_order_acquire) -
+ rte_atomic_load_explicit(&s->shared.head,
+ rte_memory_order_acquire);
/* Return 0 if available_seq needs to be updated */
return (n <= s->num_slots) ? n : 0;
@@ -168,7 +168,7 @@ struct opdl_ring {
{
uint32_t i;
uint32_t this_tail = s->shared.tail;
- uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+ uint32_t min_seq = rte_atomic_load_explicit(&s->deps[0]->tail, rte_memory_order_acquire);
/* Input stage sequence numbers are greater than the sequence numbers of
* its dependencies so an offset of t->num_slots is needed when
* calculating available slots and also the condition which is used to
@@ -179,16 +179,16 @@ struct opdl_ring {
if (is_input_stage(s)) {
wrap = s->num_slots;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((this_tail - seq) > (this_tail - min_seq))
min_seq = seq;
}
} else {
wrap = 0;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((seq - this_tail) < (min_seq - this_tail))
min_seq = seq;
}
@@ -197,8 +197,8 @@ struct opdl_ring {
if (s->threadsafe == false)
s->available_seq = min_seq + wrap;
else
- __atomic_store_n(&s->shared.available_seq, min_seq + wrap,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.available_seq, min_seq + wrap,
+ rte_memory_order_release);
}
/* Wait until the number of available slots reaches number requested */
@@ -298,7 +298,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -381,18 +381,18 @@ struct opdl_ring {
/* There should be no race condition here. If shared.tail
* matches, no other core can update it until this one does.
*/
- if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&s->shared.tail, rte_memory_order_acquire) ==
tail) {
if (num_entries >= (head - tail)) {
claim_mgr_remove(disclaims);
- __atomic_store_n(&s->shared.tail, head,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, head,
+ rte_memory_order_release);
num_entries -= (head - tail);
} else {
claim_mgr_move_tail(disclaims, num_entries);
- __atomic_store_n(&s->shared.tail,
+ rte_atomic_store_explicit(&s->shared.tail,
num_entries + tail,
- __ATOMIC_RELEASE);
+ rte_memory_order_release);
num_entries = 0;
}
} else if (block == false)
@@ -420,7 +420,7 @@ struct opdl_ring {
opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
false);
- *old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+ *old_head = rte_atomic_load_explicit(&s->shared.head, rte_memory_order_acquire);
while (true) {
bool success;
/* If called by opdl_ring_input(), claim does not need to be
@@ -440,11 +440,10 @@ struct opdl_ring {
if (*num_entries == 0)
return;
- success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+ success = rte_atomic_compare_exchange_weak_explicit(&s->shared.head, old_head,
*old_head + *num_entries,
- true, /* may fail spuriously */
- __ATOMIC_RELEASE, /* memory order on success */
- __ATOMIC_ACQUIRE); /* memory order on fail */
+ rte_memory_order_release, /* memory order on success */
+ rte_memory_order_acquire); /* memory order on fail */
if (likely(success))
break;
rte_pause();
@@ -472,10 +471,11 @@ struct opdl_ring {
/* If another thread started inputting before this one, but hasn't
* finished, we need to wait for it to complete to update the tail.
*/
- rte_wait_until_equal_32(&s->shared.tail, old_head, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&s->shared.tail, old_head,
+ rte_memory_order_acquire);
- __atomic_store_n(&s->shared.tail, old_head + num_entries,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, old_head + num_entries,
+ rte_memory_order_release);
return num_entries;
}
@@ -525,8 +525,8 @@ struct opdl_ring {
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- event = __atomic_load_n(&(ev->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -627,8 +627,8 @@ struct opdl_ring {
num_entries, s->head - old_tail);
num_entries = s->head - old_tail;
}
- __atomic_store_n(&s->shared.tail, num_entries + old_tail,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, num_entries + old_tail,
+ rte_memory_order_release);
}
uint32_t
@@ -657,7 +657,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
@@ -676,7 +676,7 @@ struct opdl_ring {
copy_entries_out(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -755,7 +755,7 @@ struct opdl_ring {
return 0;
}
if (s->threadsafe == false) {
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
s->seq += s->num_claimed;
s->shadow_head = s->head;
s->num_claimed = 0;
@@ -1008,8 +1008,8 @@ struct opdl_ring *
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- event = __atomic_load_n(&(ev_orig->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev_orig->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -1026,9 +1026,9 @@ struct opdl_ring *
if ((event & OPDL_EVENT_MASK) !=
ev_temp) {
- __atomic_store_n(&(ev_orig->event),
- ev_update,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(
+ (uint64_t __rte_atomic *)&ev_orig->event,
+ ev_update, rte_memory_order_release);
ev_updated = true;
}
if (ev_orig->u64 != ev->u64) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 24/45] event/octeontx: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (22 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 23/45] event/opdl: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 25/45] event/dsw: " Tyler Retzlaff
` (21 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/octeontx/timvf_evdev.h | 8 ++++----
drivers/event/octeontx/timvf_worker.h | 36 +++++++++++++++++------------------
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index e7a63e4..3a2dc47 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -126,15 +126,15 @@ enum timvf_clk_src {
struct __rte_aligned(8) tim_mem_bucket {
uint64_t first_chunk;
union {
- uint64_t w1;
+ RTE_ATOMIC(uint64_t) w1;
struct {
- uint32_t nb_entry;
+ RTE_ATOMIC(uint32_t) nb_entry;
uint8_t sbt:1;
uint8_t hbt:1;
uint8_t bsk:1;
uint8_t rsvd:5;
- uint8_t lock;
- int16_t chunk_remainder;
+ RTE_ATOMIC(uint8_t) lock;
+ RTE_ATOMIC(int16_t) chunk_remainder;
};
};
uint64_t current_chunk;
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index e4b923e..de9f1b0 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -19,22 +19,22 @@
static inline int16_t
timr_bkt_get_rem(struct tim_mem_bucket *bktp)
{
- return __atomic_load_n(&bktp->chunk_remainder,
- __ATOMIC_ACQUIRE);
+ return rte_atomic_load_explicit(&bktp->chunk_remainder,
+ rte_memory_order_acquire);
}
static inline void
timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_store_n(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline void
timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_fetch_sub(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline uint8_t
@@ -47,14 +47,14 @@
timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
- return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint8_t
@@ -81,34 +81,34 @@
{
/*Clear everything except lock. */
const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
- __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
- __ATOMIC_RELAXED);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
+ rte_memory_order_relaxed);
}
static inline uint64_t
timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
{
const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline void
timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
}
static inline uint32_t
@@ -121,13 +121,13 @@
static inline void
timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
}
static inline void
timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
{
- __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
}
static inline uint64_t
@@ -135,7 +135,7 @@
{
const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
TIM_BUCKET_W1_S_NUM_ENTRIES);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v;
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
}
static inline struct tim_mem_entry *
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 25/45] event/dsw: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (23 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 24/45] event/octeontx: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 26/45] dma/skeleton: " Tyler Retzlaff
` (20 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.h | 6 +++---
drivers/event/dsw/dsw_event.c | 47 +++++++++++++++++++++++++++---------------
drivers/event/dsw/dsw_xstats.c | 4 ++--
3 files changed, 35 insertions(+), 22 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 3a5989f..2018306 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -227,9 +227,9 @@ struct __rte_cache_aligned dsw_port {
alignas(RTE_CACHE_LINE_SIZE) struct rte_ring *ctl_in_ring;
/* Estimate of current port load. */
- alignas(RTE_CACHE_LINE_SIZE) int16_t load;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int16_t) load;
/* Estimate of flows currently migrating to this port. */
- alignas(RTE_CACHE_LINE_SIZE) int32_t immigration_load;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int32_t) immigration_load;
};
struct dsw_queue {
@@ -252,7 +252,7 @@ struct dsw_evdev {
uint8_t num_queues;
int32_t max_inflight;
- alignas(RTE_CACHE_LINE_SIZE) int32_t credits_on_loan;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int32_t) credits_on_loan;
};
#define DSW_CTL_PAUS_REQ (0)
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 23488d9..70c3c3a 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -33,7 +33,8 @@
}
total_on_loan =
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed);
available = dsw->max_inflight - total_on_loan;
acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
@@ -45,13 +46,16 @@
* allocation.
*/
new_total_on_loan =
- __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED) + acquired_credits;
+ rte_atomic_fetch_add_explicit(&dsw->credits_on_loan,
+ acquired_credits,
+ rte_memory_order_relaxed) +
+ acquired_credits;
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
- __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan,
+ acquired_credits,
+ rte_memory_order_relaxed);
return false;
}
@@ -77,8 +81,9 @@
port->inflight_credits = leave_credits;
- __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan,
+ return_credits,
+ rte_memory_order_relaxed);
DSW_LOG_DP_PORT(DEBUG, port->id,
"Returned %d tokens to pool.\n",
@@ -156,19 +161,22 @@
int16_t period_load;
int16_t new_load;
- old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
+ old_load = rte_atomic_load_explicit(&port->load,
+ rte_memory_order_relaxed);
period_load = dsw_port_load_close_period(port, now);
new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
(DSW_OLD_LOAD_WEIGHT+1);
- __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->load, new_load,
+ rte_memory_order_relaxed);
/* The load of the recently immigrated flows should hopefully
* be reflected the load estimate by now.
*/
- __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->immigration_load, 0,
+ rte_memory_order_relaxed);
}
static void
@@ -390,10 +398,11 @@ struct dsw_queue_flow_burst {
for (i = 0; i < dsw->num_ports; i++) {
int16_t measured_load =
- __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].load,
+ rte_memory_order_relaxed);
int32_t immigration_load =
- __atomic_load_n(&dsw->ports[i].immigration_load,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
+ rte_memory_order_relaxed);
int32_t load = measured_load + immigration_load;
load = RTE_MIN(load, DSW_MAX_LOAD);
@@ -523,8 +532,10 @@ struct dsw_queue_flow_burst {
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
- __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
- candidate_flow_load, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ &dsw->ports[candidate_port_id].immigration_load,
+ candidate_flow_load,
+ rte_memory_order_relaxed);
return true;
}
@@ -882,7 +893,8 @@ struct dsw_queue_flow_burst {
}
source_port_load =
- __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&source_port->load,
+ rte_memory_order_relaxed);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Load %d is below threshold level %d.\n",
@@ -1301,7 +1313,8 @@ struct dsw_queue_flow_burst {
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
+ rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed) >
source_port->new_event_threshold))
return 0;
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index 2a83a28..f61dfd8 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -48,7 +48,7 @@ struct dsw_xstats_port {
static uint64_t
dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
{
- return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
}
static struct dsw_xstat_dev dsw_dev_xstats[] = {
@@ -126,7 +126,7 @@ struct dsw_xstats_port {
{
int16_t load;
- load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
+ load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
return DSW_LOAD_TO_PERCENT(load);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 26/45] dma/skeleton: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (24 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 25/45] event/dsw: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 27/45] crypto/octeontx: " Tyler Retzlaff
` (19 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/dma/skeleton/skeleton_dmadev.c | 5 +++--
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
index 48f88f9..926c188 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.c
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -142,7 +142,7 @@
else if (desc->op == SKELDMA_OP_FILL)
do_fill(desc);
- __atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&hw->completed_count, 1, rte_memory_order_release);
(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
}
@@ -335,7 +335,8 @@
RTE_SET_USED(vchan);
*status = RTE_DMA_VCHAN_IDLE;
- if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
+ if (hw->submitted_count != rte_atomic_load_explicit(&hw->completed_count,
+ rte_memory_order_acquire)
|| hw->zero_req_count == 0)
*status = RTE_DMA_VCHAN_ACTIVE;
return 0;
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
index cfd37d1..0365f64 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.h
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -81,7 +81,7 @@ struct skeldma_hw {
/* Cache delimiter for cpuwork thread's operation data */
alignas(RTE_CACHE_LINE_SIZE) char cache2;
volatile uint32_t zero_req_count;
- uint64_t completed_count;
+ RTE_ATOMIC(uint64_t) completed_count;
};
#endif /* SKELETON_DMADEV_H */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 27/45] crypto/octeontx: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (25 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 26/45] dma/skeleton: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 28/45] common/mlx5: " Tyler Retzlaff
` (18 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 947e1be..bafd0c1 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -652,7 +652,7 @@
if (!rsp_info->sched_type)
ssows_head_wait(ws);
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
}
@@ -896,7 +896,7 @@
pcount = pending_queue_level(pqueue, DEFAULT_CMD_QLEN);
/* Ensure pcount isn't read before data lands */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
count = (nb_ops > pcount) ? pcount : nb_ops;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 28/45] common/mlx5: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (26 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 27/45] crypto/octeontx: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 29/45] common/idpf: " Tyler Retzlaff
` (17 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/mlx5/linux/mlx5_nl.c | 5 +--
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 ++++-----
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 +++++++++---------
drivers/common/mlx5/mlx5_common_utils.h | 6 ++--
drivers/common/mlx5/mlx5_malloc.c | 58 ++++++++++++++++-----------------
7 files changed, 61 insertions(+), 60 deletions(-)
diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
index 61192eb..a5ac4dc 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -175,10 +175,11 @@ struct mlx5_nl_port_info {
uint16_t state; /**< IB device port state (out). */
};
-uint32_t atomic_sn;
+RTE_ATOMIC(uint32_t) atomic_sn;
/* Generate Netlink sequence number. */
-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)
+#define MLX5_NL_SN_GENERATE (rte_atomic_fetch_add_explicit(&atomic_sn, 1, \
+ rte_memory_order_relaxed) + 1)
/**
* Opens a Netlink socket.
diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 9c80277..14c70ed 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -195,7 +195,7 @@ enum mlx5_cqe_status {
/* Prevent speculative reading of other fields in CQE until
* CQE is valid.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
op_code == MLX5_CQE_REQ_ERR))
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 85ec10d..50922ad 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -35,7 +35,7 @@ struct mlx5_range {
/** Memory region for a mempool. */
struct mlx5_mempool_mr {
struct mlx5_pmd_mr pmd_mr;
- uint32_t refcnt; /**< Number of mempools sharing this MR. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
};
/* Mempool registration. */
@@ -56,11 +56,11 @@ struct mlx5_mempool_reg {
{
struct mlx5_mprq_buf *buf = opaque;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
rte_mempool_put(buf->mp, buf);
- } else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0)) {
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ } else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0)) {
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
rte_mempool_put(buf->mp, buf);
}
}
@@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {
unsigned int i;
for (i = 0; i < mpr->mrs_n; i++)
- __atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {
bool ret = false;
for (i = 0; i < mpr->mrs_n; i++)
- ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0;
+ ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0;
return ret;
}
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index aa10b68..a7f1042 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {
/* Multi-Packet RQ buffer header. */
struct __rte_cache_aligned mlx5_mprq_buf {
struct rte_mempool *mp;
- uint16_t refcnt; /* Atomically accessed refcnt. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
struct rte_mbuf_ext_shared_info shinfos[];
/*
* Shared information per stride.
diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index e69d068..4b95d35 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -81,14 +81,14 @@ struct mlx5_list *
while (entry != NULL) {
if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
if (reuse) {
- ret = __atomic_fetch_add(&entry->ref_cnt, 1,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
+ rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
l_const->name, (void *)entry,
entry->ref_cnt);
} else if (lcore_index < MLX5_LIST_GLOBAL) {
- ret = __atomic_load_n(&entry->ref_cnt,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_load_explicit(&entry->ref_cnt,
+ rte_memory_order_relaxed);
}
if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
return entry;
@@ -151,13 +151,13 @@ struct mlx5_list_entry *
{
struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
- uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
- __ATOMIC_RELAXED);
+ uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
+ rte_memory_order_relaxed);
while (inv_cnt != 0 && entry != NULL) {
struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
- if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
LIST_REMOVE(entry, next);
if (l_const->lcores_share)
l_const->cb_clone_free(l_const->ctx, entry);
@@ -217,7 +217,7 @@ struct mlx5_list_entry *
entry->lcore_idx = (uint32_t)lcore_index;
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
l_const->name, lcore_index,
(void *)entry, entry->ref_cnt);
@@ -254,7 +254,7 @@ struct mlx5_list_entry *
l_inconst->gen_cnt++;
rte_rwlock_write_unlock(&l_inconst->lock);
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
(void *)entry, entry->ref_cnt);
return local_entry;
@@ -285,7 +285,7 @@ struct mlx5_list_entry *
{
struct mlx5_list_entry *gentry = entry->gentry;
- if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
if (entry->lcore_idx == (uint32_t)lcore_idx) {
LIST_REMOVE(entry, next);
@@ -294,23 +294,23 @@ struct mlx5_list_entry *
else
l_const->cb_remove(l_const->ctx, entry);
} else {
- __atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+ 1, rte_memory_order_relaxed);
}
if (!l_const->lcores_share) {
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)entry);
return 0;
}
- if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
rte_rwlock_write_lock(&l_inconst->lock);
if (likely(gentry->ref_cnt == 0)) {
LIST_REMOVE(gentry, next);
rte_rwlock_write_unlock(&l_inconst->lock);
l_const->cb_remove(l_const->ctx, gentry);
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)gentry);
return 0;
@@ -377,7 +377,7 @@ struct mlx5_list_entry *
mlx5_list_get_entry_num(struct mlx5_list *list)
{
MLX5_ASSERT(list);
- return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
}
/********************* Hash List **********************/
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index 44eba50..c5eff7a 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -29,7 +29,7 @@
*/
struct mlx5_list_entry {
LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
- alignas(8) uint32_t ref_cnt; /* 0 means, entry is invalid. */
+ alignas(8) RTE_ATOMIC(uint32_t) ref_cnt; /* 0 means, entry is invalid. */
uint32_t lcore_idx;
union {
struct mlx5_list_entry *gentry;
@@ -39,7 +39,7 @@ struct mlx5_list_entry {
struct __rte_cache_aligned mlx5_list_cache {
LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
- uint32_t inv_cnt; /* Invalid entries counter. */
+ RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
};
/**
@@ -111,7 +111,7 @@ struct mlx5_list_const {
struct mlx5_list_inconst {
rte_rwlock_t lock; /* read/write lock. */
volatile uint32_t gen_cnt; /* List modification may update it. */
- volatile uint32_t count; /* number of entries in list. */
+ volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
struct mlx5_list_cache *cache[MLX5_LIST_MAX];
/* Lcore cache, last index is the global cache. */
};
diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
index c58c41d..ef6dabe 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -16,7 +16,7 @@ struct mlx5_sys_mem {
uint32_t init:1; /* Memory allocator initialized. */
uint32_t enable:1; /* System memory select. */
uint32_t reserve:30; /* Reserve. */
- struct rte_memseg_list *last_msl;
+ RTE_ATOMIC(struct rte_memseg_list *) last_msl;
/* last allocated rte memory memseg list. */
#ifdef RTE_LIBRTE_MLX5_DEBUG
uint64_t malloc_sys;
@@ -93,14 +93,14 @@ struct mlx5_sys_mem {
* different with the cached msl.
*/
if (addr && !mlx5_mem_check_msl(addr,
- (struct rte_memseg_list *)__atomic_load_n
- (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
- __atomic_store_n(&mlx5_sys_mem.last_msl,
+ (struct rte_memseg_list *)rte_atomic_load_explicit
+ (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
+ rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
rte_mem_virt2memseg_list(addr),
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
+ rte_memory_order_relaxed);
#endif
}
}
@@ -122,11 +122,11 @@ struct mlx5_sys_mem {
* to check if the memory belongs to rte memory.
*/
if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
- __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+ rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
if (!rte_mem_virt2memseg_list(addr))
return false;
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
#endif
}
return true;
@@ -185,8 +185,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -199,8 +199,8 @@ struct mlx5_sys_mem {
addr = malloc(size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -233,8 +233,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(new_addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -246,8 +246,8 @@ struct mlx5_sys_mem {
new_addr = realloc(addr, size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -259,14 +259,14 @@ struct mlx5_sys_mem {
return;
if (!mlx5_mem_is_rte(addr)) {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
+ rte_memory_order_relaxed);
#endif
mlx5_os_free(addr);
} else {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
+ rte_memory_order_relaxed);
#endif
rte_free(addr);
}
@@ -280,14 +280,14 @@ struct mlx5_sys_mem {
" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
" update:%"PRIi64"",
- __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
#endif
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 29/45] common/idpf: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (27 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 28/45] common/mlx5: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 30/45] common/iavf: " Tyler Retzlaff
` (16 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/idpf/idpf_common_device.h | 6 +++---
drivers/common/idpf/idpf_common_rxtx.c | 14 ++++++++------
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 ++++++++--------
4 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 3834c1f..bfa927a 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -48,7 +48,7 @@ struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
struct virtchnl2_get_capabilities caps;
- volatile uint32_t pend_cmd; /* pending command not finished */
+ volatile RTE_ATOMIC(uint32_t) pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from cp */
uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
@@ -179,8 +179,8 @@ struct idpf_cmd_info {
atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops)
{
uint32_t op_unk = VIRTCHNL2_OP_UNKNOWN;
- bool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ bool ret = rte_atomic_compare_exchange_strong_explicit(&adapter->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
DRV_LOG(ERR, "There is incomplete cmd %d", adapter->pend_cmd);
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 83b131e..b09c58c 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -592,8 +592,8 @@
next_avail = 0;
rx_bufq->nb_rx_hold -= delta;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
return;
@@ -612,8 +612,8 @@
next_avail += nb_refill;
rx_bufq->nb_rx_hold -= nb_refill;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
}
@@ -1093,7 +1093,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(nmb == NULL)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -1203,7 +1204,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index b49b1ed..eeeeed1 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -97,7 +97,7 @@
#define IDPF_RX_SPLIT_BUFQ2_ID 2
struct idpf_rx_stats {
- uint64_t mbuf_alloc_failed;
+ RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
struct idpf_rx_queue {
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index f65e8d5..3b5e124 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -38,8 +38,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -168,8 +168,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
@@ -564,8 +564,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
@@ -638,8 +638,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 30/45] common/iavf: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (28 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 29/45] common/idpf: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 31/45] baseband/acc: " Tyler Retzlaff
` (15 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/iavf/iavf_impl.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
index 8919b0e..c0ff301 100644
--- a/drivers/common/iavf/iavf_impl.c
+++ b/drivers/common/iavf/iavf_impl.c
@@ -18,7 +18,7 @@ enum iavf_status
u64 size,
u32 alignment)
{
- static uint64_t iavf_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) iavf_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -26,7 +26,7 @@ enum iavf_status
return IAVF_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "iavf_dma_%" PRIu64,
- __atomic_fetch_add(&iavf_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&iavf_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 31/45] baseband/acc: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (29 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 30/45] common/iavf: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-13 18:12 ` Chautru, Nicolas
2024-05-06 17:58 ` [PATCH v5 32/45] net/txgbe: " Tyler Retzlaff
` (14 subsequent siblings)
45 siblings, 1 reply; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++------------
2 files changed, 48 insertions(+), 34 deletions(-)
diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
index 4f666e5..ee50b9c 100644
--- a/drivers/baseband/acc/rte_acc100_pmd.c
+++ b/drivers/baseband/acc/rte_acc100_pmd.c
@@ -3673,8 +3673,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3728,8 +3728,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3742,8 +3742,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3755,8 +3755,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d\n",
desc, rsp.val, descs_in_tb, desc->req.numCBs);
@@ -3793,8 +3793,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3846,8 +3846,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3902,8 +3902,8 @@
uint8_t cbs_in_tb = 1, cb_idx = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3919,8 +3919,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3930,8 +3930,8 @@
/* Read remaining CBs if exists */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
desc, rsp.val, cb_idx, cbs_in_tb);
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index 88b1104..f7c54be 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -3119,7 +3119,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
return -1;
@@ -3157,7 +3158,8 @@
struct rte_bbdev_enc_op *op;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3192,7 +3194,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + 1 > max_requested_ops)
return -1;
@@ -3208,7 +3211,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3220,7 +3224,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
@@ -3246,7 +3251,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3290,7 +3296,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3346,7 +3353,8 @@
uint32_t tb_crc_check = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3362,7 +3370,8 @@
/* Check if last CB in TB is ready to dequeue (and thus the whole TB) - checking sdone bit.
* If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3372,7 +3381,8 @@
/* Read remaining CBs if exists. */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
rsp.val, desc->rsp.add_info_0,
@@ -3790,7 +3800,8 @@
struct rte_bbdev_fft_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4116,7 +4127,8 @@
uint8_t descs_in_op, i;
desc = acc_desc_tail(q, dequeued_ops);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4127,7 +4139,8 @@
/* Get last CB. */
last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - 1);
/* Check if last op is ready to dequeue by checking fdone bit. If not exit. */
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -4137,8 +4150,8 @@
for (i = 1; i < descs_in_op - 1; i++) {
last_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
& q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit(
+ (uint64_t __rte_atomic *)last_desc, rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
}
@@ -4154,7 +4167,8 @@
for (i = 0; i < descs_in_op; i++) {
desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* RE: [PATCH v5 31/45] baseband/acc: use rte stdatomic API
2024-05-06 17:58 ` [PATCH v5 31/45] baseband/acc: " Tyler Retzlaff
@ 2024-05-13 18:12 ` Chautru, Nicolas
2024-05-13 18:59 ` Morten Brørup
0 siblings, 1 reply; 300+ messages in thread
From: Chautru, Nicolas @ 2024-05-13 18:12 UTC (permalink / raw)
To: Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Morten Brørup, Sevincer, Abdullah,
Ajit Khaparde, Alok Prasad, Burakov, Anatoly, Andrew Rybchenko,
Anoob Joseph, Richardson, Bruce, Marohn, Byron, Chenbo Xia,
Chengwen Feng, Loftus, Ciara, Power, Ciara, Dariusz Sosnowski,
Hunt, David, Devendra Singh Rawat, Carrillo, Erik G,
Guoyang Zhou, Harman Kalra, Van Haaren, Harry, Nagarahalli,
Honnappa, Jakub Grajciar, Jerin Jacob, Jeroen de Borst,
Jian Wang, Jiawen Wu, Jie Hai, Wu, Jingjing, Joshua Washington,
Joyce Kong, Guo, Junfeng, Laatz, Kevin, Konstantin Ananyev,
Liang Ma, Long Li, Maciej Czekaj, Matan Azrad, Maxime Coquelin,
Ori Kam, Pavan Nikhilesh, Mccarthy, Peter, Rahul Lakkireddy,
Pattan, Reshma, Xu, Rosen, Ruifeng Wang, Rushil Gupta, Gobriel,
Sameh, Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Medvedkin, Vladimir,
Xiaoyun Wang, Wang, Yipeng1, Yisen Zhuang, Ziyang Xuan
Hi Tyler,
Still some issues with indentation when the sed is causing split across 2 lines.
Please fix indentation, nack like that.
Thanks
Nic
> -----Original Message-----
> From: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Sent: Monday, May 6, 2024 10:58 AM
> To: dev@dpdk.org
> Cc: Mattias Rönnblom <mattias.ronnblom@ericsson.com>; Morten Brørup
> <mb@smartsharesystems.com>; Sevincer, Abdullah
> <abdullah.sevincer@intel.com>; Ajit Khaparde
> <ajit.khaparde@broadcom.com>; Alok Prasad <palok@marvell.com>;
> Burakov, Anatoly <anatoly.burakov@intel.com>; Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru>; Anoob Joseph <anoobj@marvell.com>;
> Richardson, Bruce <bruce.richardson@intel.com>; Marohn, Byron
> <byron.marohn@intel.com>; Chenbo Xia <chenbox@nvidia.com>;
> Chengwen Feng <fengchengwen@huawei.com>; Loftus, Ciara
> <ciara.loftus@intel.com>; Power, Ciara <ciara.power@intel.com>; Dariusz
> Sosnowski <dsosnowski@nvidia.com>; Hunt, David <david.hunt@intel.com>;
> Devendra Singh Rawat <dsinghrawat@marvell.com>; Carrillo, Erik G
> <erik.g.carrillo@intel.com>; Guoyang Zhou <zhouguoyang@huawei.com>;
> Harman Kalra <hkalra@marvell.com>; Van Haaren, Harry
> <harry.van.haaren@intel.com>; Nagarahalli, Honnappa
> <Honnappa.Nagarahalli@arm.com>; Jakub Grajciar <jgrajcia@cisco.com>;
> Jerin Jacob <jerinj@marvell.com>; Jeroen de Borst <jeroendb@google.com>;
> Jian Wang <jianwang@trustnetic.com>; Jiawen Wu
> <jiawenwu@trustnetic.com>; Jie Hai <haijie1@huawei.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Joshua Washington <joshwash@google.com>;
> Joyce Kong <joyce.kong@arm.com>; Guo, Junfeng <junfeng.guo@intel.com>;
> Laatz, Kevin <kevin.laatz@intel.com>; Konstantin Ananyev
> <konstantin.v.ananyev@yandex.ru>; Liang Ma <liangma@liangbit.com>;
> Long Li <longli@microsoft.com>; Maciej Czekaj <mczekaj@marvell.com>;
> Matan Azrad <matan@nvidia.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Chautru, Nicolas
> <nicolas.chautru@intel.com>; Ori Kam <orika@nvidia.com>; Pavan Nikhilesh
> <pbhagavatula@marvell.com>; Mccarthy, Peter
> <peter.mccarthy@intel.com>; Rahul Lakkireddy
> <rahul.lakkireddy@chelsio.com>; Pattan, Reshma
> <reshma.pattan@intel.com>; Xu, Rosen <rosen.xu@intel.com>; Ruifeng
> Wang <ruifeng.wang@arm.com>; Rushil Gupta <rushilg@google.com>;
> Gobriel, Sameh <sameh.gobriel@intel.com>; Sivaprasad Tummala
> <sivaprasad.tummala@amd.com>; Somnath Kotur
> <somnath.kotur@broadcom.com>; Stephen Hemminger
> <stephen@networkplumber.org>; Suanming Mou
> <suanmingm@nvidia.com>; Sunil Kumar Kori <skori@marvell.com>; Sunil
> Uttarwar <sunilprakashrao.uttarwar@amd.com>; Tetsuya Mukawa
> <mtetsuyah@gmail.com>; Vamsi Attunuru <vattunuru@marvell.com>;
> Viacheslav Ovsiienko <viacheslavo@nvidia.com>; Medvedkin, Vladimir
> <vladimir.medvedkin@intel.com>; Xiaoyun Wang
> <cloud.wangxiaoyun@huawei.com>; Wang, Yipeng1
> <yipeng1.wang@intel.com>; Yisen Zhuang <yisen.zhuang@huawei.com>;
> Ziyang Xuan <xuanziyang2@huawei.com>; Tyler Retzlaff
> <roretzla@linux.microsoft.com>
> Subject: [PATCH v5 31/45] baseband/acc: use rte stdatomic API
>
> Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding
> rte_atomic_xxx optional rte stdatomic API.
>
> Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
> drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
> drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++-----
> -------
> 2 files changed, 48 insertions(+), 34 deletions(-)
>
> diff --git a/drivers/baseband/acc/rte_acc100_pmd.c
> b/drivers/baseband/acc/rte_acc100_pmd.c
> index 4f666e5..ee50b9c 100644
> --- a/drivers/baseband/acc/rte_acc100_pmd.c
> +++ b/drivers/baseband/acc/rte_acc100_pmd.c
> @@ -3673,8 +3673,8 @@
>
> desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
> desc = q->ring_addr + desc_idx;
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3728,8 +3728,8 @@
> uint16_t current_dequeued_descs = 0, descs_in_tb;
>
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3742,8 +3742,8 @@
> /* Check if last CB in TB is ready to dequeue (and thus
> * the whole TB) - checking sdone bit. If not return.
> */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_SDONE))
> return -1;
>
> @@ -3755,8 +3755,8 @@
>
> while (i < descs_in_tb) {
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
> rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs
> %d\n",
> desc, rsp.val, descs_in_tb, desc-
> >req.numCBs); @@ -3793,8 +3793,8 @@
> struct rte_bbdev_dec_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3846,8 +3846,8 @@
> struct rte_bbdev_dec_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3902,8 +3902,8 @@
> uint8_t cbs_in_tb = 1, cb_idx = 0;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3919,8 +3919,8 @@
> /* Check if last CB in TB is ready to dequeue (and thus
> * the whole TB) - checking sdone bit. If not return.
> */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_SDONE))
> return -1;
>
> @@ -3930,8 +3930,8 @@
> /* Read remaining CBs if exists */
> while (cb_idx < cbs_in_tb) {
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
> rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
> desc, rsp.val, cb_idx,
> cbs_in_tb); diff --git a/drivers/baseband/acc/rte_vrb_pmd.c
> b/drivers/baseband/acc/rte_vrb_pmd.c
> index 88b1104..f7c54be 100644
> --- a/drivers/baseband/acc/rte_vrb_pmd.c
> +++ b/drivers/baseband/acc/rte_vrb_pmd.c
> @@ -3119,7 +3119,8 @@
>
> desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
> desc = q->ring_addr + desc_idx;
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
> return -1;
> @@ -3157,7 +3158,8 @@
> struct rte_bbdev_enc_op *op;
>
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3192,7 +3194,8 @@
> uint16_t current_dequeued_descs = 0, descs_in_tb;
>
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> if (*dequeued_ops + 1 > max_requested_ops)
> return -1;
> @@ -3208,7 +3211,8 @@
> /* Check if last CB in TB is ready to dequeue (and thus
> * the whole TB) - checking sdone bit. If not return.
> */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_SDONE))
> return -1;
>
> @@ -3220,7 +3224,8 @@
>
> while (i < descs_in_tb) {
> desc = acc_desc_tail(q, *dequeued_descs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
>
> vrb_update_dequeued_operation(desc, rsp, &op->status,
> aq_dequeued, true, false); @@ -3246,7 +3251,8 @@
> struct rte_bbdev_dec_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3290,7 +3296,8 @@
> struct rte_bbdev_dec_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3346,7 +3353,8 @@
> uint32_t tb_crc_check = 0;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -3362,7 +3370,8 @@
> /* Check if last CB in TB is ready to dequeue (and thus the whole TB) -
> checking sdone bit.
> * If not return.
> */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_SDONE))
> return -1;
>
> @@ -3372,7 +3381,8 @@
> /* Read remaining CBs if exists. */
> while (cb_idx < cbs_in_tb) {
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
> rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
> rsp.val, desc->rsp.add_info_0,
> @@ -3790,7 +3800,8 @@
> struct rte_bbdev_fft_op *op;
>
> desc = acc_desc_tail(q, dequeued_cbs);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -4116,7 +4127,8 @@
> uint8_t descs_in_op, i;
>
> desc = acc_desc_tail(q, dequeued_ops);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
>
> /* Check fdone bit. */
> if (!(atom_desc.rsp.val & ACC_FDONE))
> @@ -4127,7 +4139,8 @@
> /* Get last CB. */
> last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op -
> 1);
> /* Check if last op is ready to dequeue by checking fdone bit.
> If not exit. */
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t
> *)last_desc, __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)last_desc,
> + rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_FDONE))
> return -1;
> #ifdef RTE_LIBRTE_BBDEV_DEBUG
> @@ -4137,8 +4150,8 @@
> for (i = 1; i < descs_in_op - 1; i++) {
> last_desc = q->ring_addr + ((q->sw_ring_tail +
> dequeued_ops + i)
> & q->sw_ring_wrap_mask);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t
> *)last_desc,
> - __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit(
> + (uint64_t __rte_atomic *)last_desc,
> rte_memory_order_relaxed);
> if (!(atom_desc.rsp.val & ACC_FDONE))
> return -1;
> }
> @@ -4154,7 +4167,8 @@
>
> for (i = 0; i < descs_in_op; i++) {
> desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
> & q->sw_ring_wrap_mask);
> - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> __ATOMIC_RELAXED);
> + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> __rte_atomic *)desc,
> + rte_memory_order_relaxed);
> rsp.val = atom_desc.rsp.val;
>
> vrb_update_dequeued_operation(desc, rsp, &op->status,
> aq_dequeued, true, false);
> --
> 1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* RE: [PATCH v5 31/45] baseband/acc: use rte stdatomic API
2024-05-13 18:12 ` Chautru, Nicolas
@ 2024-05-13 18:59 ` Morten Brørup
2024-05-14 7:31 ` David Marchand
0 siblings, 1 reply; 300+ messages in thread
From: Morten Brørup @ 2024-05-13 18:59 UTC (permalink / raw)
To: Chautru, Nicolas, Tyler Retzlaff, dev
Cc: Mattias Rönnblom, Sevincer, Abdullah, Ajit Khaparde,
Alok Prasad, Burakov, Anatoly, Andrew Rybchenko, Anoob Joseph,
Richardson, Bruce, Marohn, Byron, Chenbo Xia, Chengwen Feng,
Loftus, Ciara, Power, Ciara, Dariusz Sosnowski, Hunt, David,
Devendra Singh Rawat, Carrillo, Erik G, Guoyang Zhou,
Harman Kalra, Van Haaren, Harry, Nagarahalli, Honnappa,
Jakub Grajciar, Jerin Jacob, Jeroen de Borst, Jian Wang,
Jiawen Wu, Jie Hai, Wu, Jingjing, Joshua Washington, Joyce Kong,
Guo, Junfeng, Laatz, Kevin, Konstantin Ananyev, Liang Ma,
Long Li, Maciej Czekaj, Matan Azrad, Maxime Coquelin, Ori Kam,
Pavan Nikhilesh, Mccarthy, Peter, Rahul Lakkireddy, Pattan,
Reshma, Xu, Rosen, Ruifeng Wang, Rushil Gupta, Gobriel, Sameh,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Medvedkin, Vladimir,
Xiaoyun Wang, Wang, Yipeng1, Yisen Zhuang, Ziyang Xuan
Tyler, it looks like the line continuations in /drivers/baseband/acc/rte_vrb_pmd.c are indented with four spaces instead of double <TAB>.
Med venlig hilsen / Kind regards,
-Morten Brørup
> From: Chautru, Nicolas [mailto:nicolas.chautru@intel.com]
> Sent: Monday, 13 May 2024 20.13
>
> Hi Tyler,
>
> Still some issues with indentation when the sed is causing split across
> 2 lines.
> Please fix indentation, nack like that.
>
> Thanks
> Nic
>
> > From: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > Sent: Monday, May 6, 2024 10:58 AM
> >
> > Replace the use of gcc builtin __atomic_xxx intrinsics with
> corresponding
> > rte_atomic_xxx optional rte stdatomic API.
> >
> > Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
> > Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> > ---
> > drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++-------------
> -
> > drivers/baseband/acc/rte_vrb_pmd.c | 46 +++++++++++++++++++++++---
> --
> > -------
> > 2 files changed, 48 insertions(+), 34 deletions(-)
> >
> > diff --git a/drivers/baseband/acc/rte_acc100_pmd.c
> > b/drivers/baseband/acc/rte_acc100_pmd.c
> > index 4f666e5..ee50b9c 100644
> > --- a/drivers/baseband/acc/rte_acc100_pmd.c
> > +++ b/drivers/baseband/acc/rte_acc100_pmd.c
> > @@ -3673,8 +3673,8 @@
> >
> > desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
> > desc = q->ring_addr + desc_idx;
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3728,8 +3728,8 @@
> > uint16_t current_dequeued_descs = 0, descs_in_tb;
> >
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3742,8 +3742,8 @@
> > /* Check if last CB in TB is ready to dequeue (and thus
> > * the whole TB) - checking sdone bit. If not return.
> > */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_SDONE))
> > return -1;
> >
> > @@ -3755,8 +3755,8 @@
> >
> > while (i < descs_in_tb) {
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> > rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs
> > %d\n",
> > desc, rsp.val, descs_in_tb, desc-
> > >req.numCBs); @@ -3793,8 +3793,8 @@
> > struct rte_bbdev_dec_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3846,8 +3846,8 @@
> > struct rte_bbdev_dec_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3902,8 +3902,8 @@
> > uint8_t cbs_in_tb = 1, cb_idx = 0;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3919,8 +3919,8 @@
> > /* Check if last CB in TB is ready to dequeue (and thus
> > * the whole TB) - checking sdone bit. If not return.
> > */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_SDONE))
> > return -1;
> >
> > @@ -3930,8 +3930,8 @@
> > /* Read remaining CBs if exists */
> > while (cb_idx < cbs_in_tb) {
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> > rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
> > desc, rsp.val, cb_idx,
> > cbs_in_tb); diff --git a/drivers/baseband/acc/rte_vrb_pmd.c
> > b/drivers/baseband/acc/rte_vrb_pmd.c
> > index 88b1104..f7c54be 100644
> > --- a/drivers/baseband/acc/rte_vrb_pmd.c
> > +++ b/drivers/baseband/acc/rte_vrb_pmd.c
> > @@ -3119,7 +3119,8 @@
> >
> > desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
> > desc = q->ring_addr + desc_idx;
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
> > return -1;
> > @@ -3157,7 +3158,8 @@
> > struct rte_bbdev_enc_op *op;
> >
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3192,7 +3194,8 @@
> > uint16_t current_dequeued_descs = 0, descs_in_tb;
> >
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > if (*dequeued_ops + 1 > max_requested_ops)
> > return -1;
> > @@ -3208,7 +3211,8 @@
> > /* Check if last CB in TB is ready to dequeue (and thus
> > * the whole TB) - checking sdone bit. If not return.
> > */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_SDONE))
> > return -1;
> >
> > @@ -3220,7 +3224,8 @@
> >
> > while (i < descs_in_tb) {
> > desc = acc_desc_tail(q, *dequeued_descs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> >
> > vrb_update_dequeued_operation(desc, rsp, &op->status,
> > aq_dequeued, true, false); @@ -3246,7 +3251,8 @@
> > struct rte_bbdev_dec_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3290,7 +3296,8 @@
> > struct rte_bbdev_dec_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3346,7 +3353,8 @@
> > uint32_t tb_crc_check = 0;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -3362,7 +3370,8 @@
> > /* Check if last CB in TB is ready to dequeue (and thus the whole
> TB) -
> > checking sdone bit.
> > * If not return.
> > */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_SDONE))
> > return -1;
> >
> > @@ -3372,7 +3381,8 @@
> > /* Read remaining CBs if exists. */
> > while (cb_idx < cbs_in_tb) {
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> > rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
> > rsp.val, desc->rsp.add_info_0,
> > @@ -3790,7 +3800,8 @@
> > struct rte_bbdev_fft_op *op;
> >
> > desc = acc_desc_tail(q, dequeued_cbs);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -4116,7 +4127,8 @@
> > uint8_t descs_in_op, i;
> >
> > desc = acc_desc_tail(q, dequeued_ops);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> >
> > /* Check fdone bit. */
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > @@ -4127,7 +4139,8 @@
> > /* Get last CB. */
> > last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op -
> > 1);
> > /* Check if last op is ready to dequeue by checking fdone
> bit.
> > If not exit. */
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t
> > *)last_desc, __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)last_desc,
> > + rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > return -1;
> > #ifdef RTE_LIBRTE_BBDEV_DEBUG
> > @@ -4137,8 +4150,8 @@
> > for (i = 1; i < descs_in_op - 1; i++) {
> > last_desc = q->ring_addr + ((q->sw_ring_tail +
> > dequeued_ops + i)
> > & q->sw_ring_wrap_mask);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t
> > *)last_desc,
> > - __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit(
> > + (uint64_t __rte_atomic *)last_desc,
> > rte_memory_order_relaxed);
> > if (!(atom_desc.rsp.val & ACC_FDONE))
> > return -1;
> > }
> > @@ -4154,7 +4167,8 @@
> >
> > for (i = 0; i < descs_in_op; i++) {
> > desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
> > & q->sw_ring_wrap_mask);
> > - atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
> > __ATOMIC_RELAXED);
> > + atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t
> > __rte_atomic *)desc,
> > + rte_memory_order_relaxed);
> > rsp.val = atom_desc.rsp.val;
> >
> > vrb_update_dequeued_operation(desc, rsp, &op->status,
> > aq_dequeued, true, false);
> > --
> > 1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v5 31/45] baseband/acc: use rte stdatomic API
2024-05-13 18:59 ` Morten Brørup
@ 2024-05-14 7:31 ` David Marchand
2024-05-14 16:03 ` Tyler Retzlaff
0 siblings, 1 reply; 300+ messages in thread
From: David Marchand @ 2024-05-14 7:31 UTC (permalink / raw)
To: Morten Brørup, Tyler Retzlaff
Cc: Chautru, Nicolas, dev, Mattias Rönnblom, Sevincer, Abdullah,
Ajit Khaparde, Alok Prasad, Burakov, Anatoly, Andrew Rybchenko,
Anoob Joseph, Richardson, Bruce, Marohn, Byron, Chenbo Xia,
Chengwen Feng, Loftus, Ciara, Power, Ciara, Dariusz Sosnowski,
Hunt, David, Devendra Singh Rawat, Carrillo, Erik G,
Guoyang Zhou, Harman Kalra, Van Haaren, Harry, Nagarahalli,
Honnappa, Jakub Grajciar, Jerin Jacob, Jeroen de Borst,
Jian Wang, Jiawen Wu, Jie Hai, Wu, Jingjing, Joshua Washington,
Joyce Kong, Guo, Junfeng, Laatz, Kevin, Konstantin Ananyev,
Liang Ma, Long Li, Maciej Czekaj, Matan Azrad, Maxime Coquelin,
Ori Kam, Pavan Nikhilesh, Mccarthy, Peter, Rahul Lakkireddy,
Pattan, Reshma, Xu, Rosen, Ruifeng Wang, Rushil Gupta, Gobriel,
Sameh, Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Medvedkin, Vladimir,
Xiaoyun Wang, Wang, Yipeng1, Yisen Zhuang, Ziyang Xuan
On Mon, May 13, 2024 at 9:05 PM Morten Brørup <mb@smartsharesystems.com> wrote:
>
> Tyler, it looks like the line continuations in /drivers/baseband/acc/rte_vrb_pmd.c are indented with four spaces instead of double <TAB>.
For Tyler defence, indentation is a nice mess in a lot of DPDK code
(for legacy, superficial reviews, not enough coffee etc... reasons).
mlx5 code is also a big pain as it seems to have its own coding style.
I was in the progress of taking this series yesterday and I was
looking at those indents before getting EINTR.
Tyler, can you please fix those? This will save me some time and I can
get this merged soon.
Thanks.
--
David Marchand
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v5 31/45] baseband/acc: use rte stdatomic API
2024-05-14 7:31 ` David Marchand
@ 2024-05-14 16:03 ` Tyler Retzlaff
0 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:03 UTC (permalink / raw)
To: David Marchand
Cc: Morten Brørup, Chautru, Nicolas, dev, Mattias Rönnblom,
Sevincer, Abdullah, Ajit Khaparde, Alok Prasad, Burakov, Anatoly,
Andrew Rybchenko, Anoob Joseph, Richardson, Bruce, Marohn, Byron,
Chenbo Xia, Chengwen Feng, Loftus, Ciara, Power, Ciara,
Dariusz Sosnowski, Hunt, David, Devendra Singh Rawat, Carrillo,
Erik G, Guoyang Zhou, Harman Kalra, Van Haaren, Harry,
Nagarahalli, Honnappa, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Wu, Jingjing,
Joshua Washington, Joyce Kong, Guo, Junfeng, Laatz, Kevin,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Ori Kam, Pavan Nikhilesh, Mccarthy,
Peter, Rahul Lakkireddy, Pattan, Reshma, Xu, Rosen, Ruifeng Wang,
Rushil Gupta, Gobriel, Sameh, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Medvedkin, Vladimir, Xiaoyun Wang, Wang,
Yipeng1, Yisen Zhuang, Ziyang Xuan
On Tue, May 14, 2024 at 09:31:18AM +0200, David Marchand wrote:
> On Mon, May 13, 2024 at 9:05 PM Morten Brørup <mb@smartsharesystems.com> wrote:
> >
> > Tyler, it looks like the line continuations in /drivers/baseband/acc/rte_vrb_pmd.c are indented with four spaces instead of double <TAB>.
>
> For Tyler defence, indentation is a nice mess in a lot of DPDK code
> (for legacy, superficial reviews, not enough coffee etc... reasons).
> mlx5 code is also a big pain as it seems to have its own coding style.
>
> I was in the progress of taking this series yesterday and I was
> looking at those indents before getting EINTR.
> Tyler, can you please fix those? This will save me some time and I can
> get this merged soon.
sure David i'll tweak this patch.
>
>
> Thanks.
>
> --
> David Marchand
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 32/45] net/txgbe: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (30 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 31/45] baseband/acc: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 33/45] net/null: " Tyler Retzlaff
` (13 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/txgbe/txgbe_ethdev.c | 12 +++++++-----
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
3 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b75e889..a58f197 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -595,7 +595,7 @@ static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -2834,7 +2834,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev);
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -2859,7 +2859,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
rte_thread_detach(rte_thread_self());
txgbe_dev_setup_link_alarm_handler(dev);
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -2908,7 +2908,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
} else if (hw->phy.media_type == txgbe_media_type_fiber &&
dev->data->dev_conf.intr_conf.lsc != 0) {
txgbe_dev_wait_setup_link_complete(dev, 0);
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the TXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -2918,7 +2919,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
"txgbe-link",
txgbe_dev_setup_link_thread_handler, dev) < 0) {
PMD_DRV_LOG(ERR, "Create link thread failed!");
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 7e8067c..e8f55f7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -372,7 +372,7 @@ struct txgbe_adapter {
/* For RSS reta table update */
uint8_t rss_reta_updated;
- uint32_t link_thread_running;
+ RTE_ATOMIC(uint32_t) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index f1341fb..1abc190 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -206,7 +206,7 @@ static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->device_id = pci_dev->id.device_id;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 33/45] net/null: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (31 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 32/45] net/txgbe: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 34/45] event/dlb2: " Tyler Retzlaff
` (12 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/null/rte_eth_null.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 7c46004..f4ed3b8 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -37,8 +37,8 @@ struct null_queue {
struct rte_mempool *mb_pool;
struct rte_mbuf *dummy_packet;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_options {
@@ -102,7 +102,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -130,7 +130,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -155,7 +155,7 @@ struct pmd_internals {
rte_pktmbuf_free(bufs[i]);
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -178,7 +178,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 34/45] event/dlb2: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (32 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 33/45] net/null: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 35/45] dma/idxd: " Tyler Retzlaff
` (11 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/dlb2/dlb2.c | 34 +++++++++++++++++-----------------
drivers/event/dlb2/dlb2_priv.h | 13 +++++--------
drivers/event/dlb2/dlb2_xstats.c | 2 +-
3 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 628ddef..0b91f03 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1005,7 +1005,7 @@ struct process_local_port_data
}
dlb2->new_event_limit = config->nb_events_limit;
- __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&dlb2->inflights, 0, rte_memory_order_seq_cst);
/* Save number of ports/queues for this event dev */
dlb2->num_ports = config->nb_event_ports;
@@ -2668,10 +2668,10 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
batch_size = credits;
if (likely(credits &&
- __atomic_compare_exchange_n(
+ rte_atomic_compare_exchange_strong_explicit(
qm_port->credit_pool[type],
- &credits, credits - batch_size, false,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
+ &credits, credits - batch_size,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst)))
return batch_size;
else
return 0;
@@ -2687,7 +2687,7 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
/* Replenish credits, saving one quanta for enqueues */
uint16_t val = ev_port->inflight_credits - quanta;
- __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
ev_port->inflight_credits -= val;
}
}
@@ -2696,8 +2696,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_port *ev_port)
{
- uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
- __ATOMIC_SEQ_CST);
+ uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
+ rte_memory_order_seq_cst);
const int num = 1;
if (unlikely(ev_port->inflight_max < sw_inflights)) {
@@ -2719,8 +2719,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
return 1;
}
- __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
+ rte_memory_order_seq_cst);
ev_port->inflight_credits += (credit_update_quanta);
if (ev_port->inflight_credits < num) {
@@ -3234,17 +3234,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_ldb_credits += num;
if (qm_port->cached_ldb_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_LDB_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_ldb_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
@@ -3252,17 +3252,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_dir_credits += num;
if (qm_port->cached_dir_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_DIR_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_dir_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index 49f1c66..2470ae0 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -348,7 +348,7 @@ struct dlb2_port {
uint32_t dequeue_depth;
enum dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
- uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) *credit_pool[DLB2_NUM_QUEUE_TYPES];
union {
struct {
uint16_t cached_ldb_credits;
@@ -586,7 +586,7 @@ struct dlb2_eventdev {
uint32_t xstats_count_mode_dev;
uint32_t xstats_count_mode_port;
uint32_t xstats_count;
- uint32_t inflights; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) inflights;
uint32_t new_event_limit;
int max_num_events_override;
int num_dir_credits_override;
@@ -623,15 +623,12 @@ struct dlb2_eventdev {
struct {
uint16_t max_ldb_credits;
uint16_t max_dir_credits;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t ldb_credit_pool;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t dir_credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) ldb_credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) dir_credit_pool;
};
struct {
uint16_t max_credits;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) credit_pool;
};
};
uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */
diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
index ff15271..22094f3 100644
--- a/drivers/event/dlb2/dlb2_xstats.c
+++ b/drivers/event/dlb2/dlb2_xstats.c
@@ -173,7 +173,7 @@ struct dlb2_xstats_entry {
case nb_events_limit:
return dlb2->new_event_limit;
case inflight_events:
- return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
+ return rte_atomic_load_explicit(&dlb2->inflights, rte_memory_order_seq_cst);
case ldb_pool_size:
return dlb2->num_ldb_credits;
case dir_pool_size:
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 35/45] dma/idxd: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (33 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 34/45] event/dlb2: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 36/45] crypto/ccp: " Tyler Retzlaff
` (10 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index cd41777..537cf9b 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -33,7 +33,7 @@ struct idxd_pci_common {
rte_spinlock_t lk;
uint8_t wq_cfg_sz;
- uint16_t ref_count;
+ RTE_ATOMIC(uint16_t) ref_count;
volatile struct rte_idxd_bar0 *regs;
volatile uint32_t *wq_regs_base;
volatile struct rte_idxd_grpcfg *grp_regs;
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index a78889a..06fa115 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -136,7 +136,8 @@
* the PCI struct
*/
/* NOTE: review for potential ordering optimization */
- is_last_wq = (__atomic_fetch_sub(&idxd->u.pci->ref_count, 1, __ATOMIC_SEQ_CST) == 1);
+ is_last_wq = (rte_atomic_fetch_sub_explicit(&idxd->u.pci->ref_count, 1,
+ rte_memory_order_seq_cst) == 1);
if (is_last_wq) {
/* disable the device */
err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -330,9 +331,9 @@
return ret;
}
qid = rte_dma_get_dev_id_by_name(qname);
- max_qid = __atomic_load_n(
+ max_qid = rte_atomic_load_explicit(
&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
- __ATOMIC_SEQ_CST);
+ rte_memory_order_seq_cst);
/* we have queue 0 done, now configure the rest of the queues */
for (qid = 1; qid < max_qid; qid++) {
@@ -389,7 +390,7 @@
free(idxd.u.pci);
return ret;
}
- __atomic_fetch_add(&idxd.u.pci->ref_count, 1, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&idxd.u.pci->ref_count, 1, rte_memory_order_seq_cst);
}
return 0;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 36/45] crypto/ccp: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (34 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 35/45] dma/idxd: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 37/45] common/cpt: " Tyler Retzlaff
` (9 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/crypto/ccp/ccp_dev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index b7ca3af..41c1422 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -116,15 +116,15 @@ struct ccp_queue *
static inline void
ccp_set_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ (1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline void
ccp_clear_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_and_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ ~(1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline uint32_t
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 37/45] common/cpt: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (35 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 36/45] crypto/ccp: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 38/45] bus/vmbus: " Tyler Retzlaff
` (8 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/cpt/cpt_common.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index 6596cc0..dee430f 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -73,7 +73,7 @@ struct __rte_aligned(8) cpt_request_info {
const unsigned int qsize)
{
/* Ensure ordering between setting the entry and updating the tail */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
q->tail = (q->tail + cnt) & (qsize - 1);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 38/45] bus/vmbus: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (36 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 37/45] common/cpt: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 39/45] examples: " Tyler Retzlaff
` (7 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
drivers/bus/vmbus/vmbus_channel.c | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/bus/vmbus/rte_vmbus_reg.h b/drivers/bus/vmbus/rte_vmbus_reg.h
index a17ce40..e3299aa 100644
--- a/drivers/bus/vmbus/rte_vmbus_reg.h
+++ b/drivers/bus/vmbus/rte_vmbus_reg.h
@@ -28,7 +28,7 @@ struct vmbus_message {
*/
struct vmbus_mon_trig {
- uint32_t pending;
+ RTE_ATOMIC(uint32_t) pending;
uint32_t armed;
} __rte_packed;
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index 4d74df3..925c2aa 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -19,16 +19,16 @@
#include "private.h"
static inline void
-vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+vmbus_sync_set_bit(volatile RTE_ATOMIC(uint32_t) *addr, uint32_t mask)
{
- /* Use GCC builtin which atomic does atomic OR operation */
- __atomic_fetch_or(addr, mask, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit(addr, mask, rte_memory_order_seq_cst);
}
static inline void
vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
{
- uint32_t *monitor_addr, monitor_mask;
+ RTE_ATOMIC(uint32_t) *monitor_addr;
+ uint32_t monitor_mask;
unsigned int trigger_index;
trigger_index = monitor_id / HV_MON_TRIG_LEN;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 39/45] examples: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (37 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 38/45] bus/vmbus: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 40/45] app/dumpcap: " Tyler Retzlaff
` (6 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
examples/bbdev_app/main.c | 13 +++++----
examples/l2fwd-event/l2fwd_common.h | 4 +--
examples/l2fwd-event/l2fwd_event.c | 24 ++++++++--------
examples/l2fwd-jobstats/main.c | 11 ++++----
.../client_server_mp/mp_server/main.c | 6 ++--
examples/server_node_efd/efd_server/main.c | 6 ++--
examples/vhost/main.c | 32 +++++++++++-----------
examples/vhost/main.h | 4 +--
examples/vhost/virtio_net.c | 13 +++++----
examples/vhost_blk/vhost_blk.c | 8 +++---
examples/vm_power_manager/channel_manager.h | 4 ++-
examples/vm_power_manager/channel_monitor.c | 9 +++---
examples/vm_power_manager/vm_power_cli.c | 3 +-
13 files changed, 73 insertions(+), 64 deletions(-)
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index d4c686c..7124b49 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -165,7 +165,7 @@ struct stats_lcore_params {
.num_dec_cores = 1,
};
-static uint16_t global_exit_flag;
+static RTE_ATOMIC(uint16_t) global_exit_flag;
/* display usage */
static inline void
@@ -277,7 +277,7 @@ uint16_t bbdev_parse_number(const char *mask)
signal_handler(int signum)
{
printf("\nSignal %d received\n", signum);
- __atomic_store_n(&global_exit_flag, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 1, rte_memory_order_relaxed);
}
static void
@@ -321,7 +321,8 @@ uint16_t bbdev_parse_number(const char *mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME &&
- !__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED); count++) {
+ !rte_atomic_load_explicit(&global_exit_flag,
+ rte_memory_order_relaxed); count++) {
memset(&link, 0, sizeof(link));
link_get_err = rte_eth_link_get_nowait(port_id, &link);
@@ -675,7 +676,7 @@ uint16_t bbdev_parse_number(const char *mask)
{
struct stats_lcore_params *stats_lcore = arg;
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
print_stats(stats_lcore);
rte_delay_ms(500);
}
@@ -921,7 +922,7 @@ uint16_t bbdev_parse_number(const char *mask)
const bool run_decoder = (lcore_conf->core_type &
(1 << RTE_BBDEV_OP_TURBO_DEC));
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
if (run_encoder)
run_encoding(lcore_conf);
if (run_decoder)
@@ -1055,7 +1056,7 @@ uint16_t bbdev_parse_number(const char *mask)
.align = alignof(struct rte_mbuf *),
};
- __atomic_store_n(&global_exit_flag, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 0, rte_memory_order_relaxed);
sigret = signal(SIGTERM, signal_handler);
if (sigret == SIG_ERR)
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index c56b3e7..8cf91b9 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -61,8 +61,8 @@
/* Per-port statistics struct */
struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t dropped;
- uint64_t tx;
- uint64_t rx;
+ RTE_ATOMIC(uint64_t) tx;
+ RTE_ATOMIC(uint64_t) rx;
};
/* Event vector attributes */
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 4b5a032..2247202 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -163,8 +163,8 @@
dst_port = rsrc->dst_ports[mbuf->port];
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].rx,
+ 1, rte_memory_order_relaxed);
mbuf->port = dst_port;
if (flags & L2FWD_EVENT_UPDT_MAC)
@@ -179,8 +179,8 @@
rte_event_eth_tx_adapter_txq_set(mbuf, 0);
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].tx,
+ 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -367,8 +367,8 @@
vec->queue = 0;
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbufs[0]->port].rx,
+ vec->nb_elem, rte_memory_order_relaxed);
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (j < vec->nb_elem)
@@ -382,14 +382,14 @@
}
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[vec->port].tx,
+ vec->nb_elem, rte_memory_order_relaxed);
} else {
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].rx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (j < vec->nb_elem)
rte_prefetch0(
@@ -406,9 +406,9 @@
rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].tx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
}
}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index cb7582a..308b8ed 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -80,7 +80,7 @@ struct __rte_cache_aligned lcore_queue_conf {
struct rte_jobstats idle_job;
struct rte_jobstats_context jobs_context;
- uint16_t stats_read_pending;
+ RTE_ATOMIC(uint16_t) stats_read_pending;
rte_spinlock_t lock;
};
/* >8 End of list of queues to be polled for given lcore. */
@@ -151,9 +151,9 @@ struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t collection_time = rte_get_timer_cycles();
/* Ask forwarding thread to give us stats. */
- __atomic_store_n(&qconf->stats_read_pending, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&qconf->lock);
- __atomic_store_n(&qconf->stats_read_pending, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 0, rte_memory_order_relaxed);
/* Collect context statistics. */
stats_period = ctx->state_time - ctx->start_time;
@@ -522,8 +522,9 @@ struct __rte_cache_aligned l2fwd_port_statistics {
repeats++;
need_manage = qconf->flush_timer.expire < now;
/* Check if we was esked to give a stats. */
- stats_read_pending = __atomic_load_n(&qconf->stats_read_pending,
- __ATOMIC_RELAXED);
+ stats_read_pending = rte_atomic_load_explicit(
+ &qconf->stats_read_pending,
+ rte_memory_order_relaxed);
need_manage |= stats_read_pending;
for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index f54bb8b..ebfc2fe 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -157,12 +157,12 @@ struct client_rx_buf {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
uint32_t status = 0;
/* Only one core should display stats */
- if (__atomic_compare_exchange_n(&display_stats, &status, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &status, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/server_node_efd/efd_server/main.c b/examples/server_node_efd/efd_server/main.c
index fd72882..75ff0ea 100644
--- a/examples/server_node_efd/efd_server/main.c
+++ b/examples/server_node_efd/efd_server/main.c
@@ -177,12 +177,12 @@ struct efd_stats {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
/* Only one core should display stats */
uint32_t display_init = 0;
- if (__atomic_compare_exchange_n(&display_stats, &display_init, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &display_init, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned int sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3fc1b15..4391d88 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1052,10 +1052,10 @@ static unsigned check_ports_num(unsigned nb_ports)
}
if (enable_stats) {
- __atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_total_atomic, 1,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
src_vdev->stats.tx_total++;
src_vdev->stats.tx += ret;
}
@@ -1072,10 +1072,10 @@ static unsigned check_ports_num(unsigned nb_ports)
ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, nr_xmit,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1404,10 +1404,10 @@ static void virtio_tx_offload(struct rte_mbuf *m)
}
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, rx_count,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, enqueue_count,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1832,10 +1832,10 @@ uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
- __ATOMIC_SEQ_CST);
- rx = __atomic_load_n(&vdev->stats.rx_atomic,
- __ATOMIC_SEQ_CST);
+ rx_total = rte_atomic_load_explicit(&vdev->stats.rx_total_atomic,
+ rte_memory_order_seq_cst);
+ rx = rte_atomic_load_explicit(&vdev->stats.rx_atomic,
+ rte_memory_order_seq_cst);
rx_dropped = rx_total - rx;
printf("Statistics for device %d\n"
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index c1c9a42..c986cbc 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -22,8 +22,8 @@
struct device_statistics {
uint64_t tx;
uint64_t tx_total;
- uint64_t rx_atomic;
- uint64_t rx_total_atomic;
+ RTE_ATOMIC(uint64_t) rx_atomic;
+ RTE_ATOMIC(uint64_t) rx_total_atomic;
};
struct vhost_queue {
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 514c8e0..55af6e7 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -198,7 +198,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
+ avail_idx = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire);
start_idx = queue->last_used_idx;
free_entries = avail_idx - start_idx;
count = RTE_MIN(count, free_entries);
@@ -231,7 +232,8 @@
rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
}
- __atomic_fetch_add(&vr->used->idx, count, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, count,
+ rte_memory_order_release);
queue->last_used_idx += count;
rte_vhost_vring_call(dev->vid, queue_id);
@@ -386,8 +388,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
- queue->last_avail_idx;
+ free_entries = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire) - queue->last_avail_idx;
if (free_entries == 0)
return 0;
@@ -442,7 +444,8 @@
queue->last_avail_idx += i;
queue->last_used_idx += i;
- __atomic_fetch_add(&vr->used->idx, i, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, i,
+ rte_memory_order_acq_rel);
rte_vhost_vring_call(dev->vid, queue_id);
diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
index 376f7b8..03f1ac9 100644
--- a/examples/vhost_blk/vhost_blk.c
+++ b/examples/vhost_blk/vhost_blk.c
@@ -85,9 +85,9 @@ struct vhost_blk_ctrlr *
*/
used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
used->idx++;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
vq->id, used->idx, task->req_idx);
@@ -111,12 +111,12 @@ struct vhost_blk_ctrlr *
desc->id = task->buffer_id;
desc->addr = 0;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
if (vq->used_wrap_counter)
desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
else
desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
task->inflight_idx);
diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index 7038e9d..eb989b2 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -13,6 +13,8 @@
#include <linux/un.h>
#include <stdbool.h>
+#include <rte_stdatomic.h>
+
/* Maximum name length including '\0' terminator */
#define CHANNEL_MGR_MAX_NAME_LEN 64
@@ -58,7 +60,7 @@ enum channel_type {
*/
struct channel_info {
char channel_path[UNIX_PATH_MAX]; /**< Path to host socket */
- volatile uint32_t status; /**< Connection status(enum channel_status) */
+ volatile RTE_ATOMIC(uint32_t) status; /**< Connection status(enum channel_status) */
int fd; /**< AF_UNIX socket fd */
unsigned channel_num; /**< CHANNEL_MGR_SOCKET_PATH/<vm_name>.channel_num */
enum channel_type type; /**< Binary, ini, json, etc. */
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 5fef268..d384c86 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -828,8 +828,9 @@ void channel_monitor_exit(void)
return -1;
uint32_t channel_connected = CHANNEL_MGR_CHANNEL_CONNECTED;
- if (__atomic_compare_exchange_n(&(chan_info->status), &channel_connected,
- CHANNEL_MGR_CHANNEL_PROCESSING, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_connected,
+ CHANNEL_MGR_CHANNEL_PROCESSING, rte_memory_order_relaxed,
+ rte_memory_order_relaxed) == 0)
return -1;
if (pkt->command == RTE_POWER_CPU_POWER) {
@@ -934,8 +935,8 @@ void channel_monitor_exit(void)
* from management thread
*/
uint32_t channel_processing = CHANNEL_MGR_CHANNEL_PROCESSING;
- __atomic_compare_exchange_n(&(chan_info->status), &channel_processing,
- CHANNEL_MGR_CHANNEL_CONNECTED, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_processing,
+ CHANNEL_MGR_CHANNEL_CONNECTED, rte_memory_order_relaxed, rte_memory_order_relaxed);
return 0;
}
diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index 1a55e55..c078325 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -74,7 +74,8 @@ struct cmd_show_vm_result {
for (i = 0; i < info.num_channels; i++) {
cmdline_printf(cl, " [%u]: %s, status = ", i,
info.channels[i].channel_path);
- switch (info.channels[i].status) {
+ switch (rte_atomic_load_explicit(&info.channels[i].status,
+ rte_memory_order_relaxed)) {
case CHANNEL_MGR_CHANNEL_CONNECTED:
cmdline_printf(cl, "CONNECTED\n");
break;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 40/45] app/dumpcap: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (38 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 39/45] examples: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 41/45] app/test: " Tyler Retzlaff
` (5 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/dumpcap/main.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/app/dumpcap/main.c b/app/dumpcap/main.c
index cc0f66b..b25b95e 100644
--- a/app/dumpcap/main.c
+++ b/app/dumpcap/main.c
@@ -51,7 +51,7 @@
/* command line flags */
static const char *progname;
-static bool quit_signal;
+static RTE_ATOMIC(bool) quit_signal;
static bool group_read;
static bool quiet;
static bool use_pcapng = true;
@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv)
static void
signal_handler(int sig_num __rte_unused)
{
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
@@ -490,7 +490,7 @@ static void statistics_loop(void)
printf("%-15s %10s %10s\n",
"Interface", "Received", "Dropped");
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
RTE_ETH_FOREACH_DEV(p) {
if (rte_eth_dev_get_name_by_port(p, name) < 0)
continue;
@@ -528,7 +528,7 @@ static void statistics_loop(void)
static void
monitor_primary(void *arg __rte_unused)
{
- if (__atomic_load_n(&quit_signal, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed))
return;
if (rte_eal_primary_proc_alive(NULL)) {
@@ -536,7 +536,7 @@ static void statistics_loop(void)
} else {
fprintf(stderr,
"Primary process is no longer active, exiting...\n");
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
}
@@ -983,7 +983,7 @@ int main(int argc, char **argv)
show_count(0);
}
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
if (process_ring(out, r) < 0) {
fprintf(stderr, "pcapng file write failed; %s\n",
strerror(errno));
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 41/45] app/test: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (39 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 40/45] app/dumpcap: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 42/45] app/test-eventdev: " Tyler Retzlaff
` (4 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test/test_bpf.c | 46 ++++++++-----
app/test/test_distributor.c | 114 ++++++++++++++++-----------------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++++----
app/test/test_hash_multiwriter.c | 16 ++---
app/test/test_hash_readwrite.c | 74 ++++++++++-----------
app/test/test_hash_readwrite_lf_perf.c | 88 ++++++++++++-------------
app/test/test_lcores.c | 25 ++++----
app/test/test_lpm_perf.c | 14 ++--
app/test/test_mcslock.c | 12 ++--
app/test/test_mempool_perf.c | 9 +--
app/test/test_pflock.c | 13 ++--
app/test/test_pmd_perf.c | 10 +--
app/test/test_rcu_qsbr_perf.c | 114 +++++++++++++++++----------------
app/test/test_ring_perf.c | 11 ++--
app/test/test_ring_stress_impl.h | 10 +--
app/test/test_rwlock.c | 9 +--
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +++----
app/test/test_spinlock.c | 9 +--
app/test/test_stack_perf.c | 12 ++--
app/test/test_threads.c | 33 +++++-----
app/test/test_ticketlock.c | 9 +--
app/test/test_timer.c | 31 +++++----
24 files changed, 378 insertions(+), 343 deletions(-)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index 53e3a31..2e43442 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -39,8 +39,8 @@
*/
struct dummy_offset {
- uint64_t u64;
- uint32_t u32;
+ RTE_ATOMIC(uint64_t) u64;
+ RTE_ATOMIC(uint32_t) u32;
uint16_t u16;
uint8_t u8;
};
@@ -1581,32 +1581,46 @@ struct bpf_test {
memset(&dfe, 0, sizeof(dfe));
rv = 1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = -1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = (int32_t)TEST_FILL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_3;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
}
diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index 13357b9..60fe96e 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -47,14 +47,14 @@ struct worker_params {
struct worker_params worker_params;
/* statics - all zero-initialized by default */
-static volatile int quit; /**< general quit variable for all threads */
-static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
-static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
-static volatile unsigned worker_idx;
-static volatile unsigned zero_idx;
+static volatile RTE_ATOMIC(int) quit; /**< general quit variable for all threads */
+static volatile RTE_ATOMIC(int) zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile RTE_ATOMIC(int) zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
+static volatile RTE_ATOMIC(unsigned int) zero_idx;
struct __rte_cache_aligned worker_stats {
- volatile unsigned handled_packets;
+ volatile RTE_ATOMIC(unsigned int) handled_packets;
};
struct worker_stats worker_stats[RTE_MAX_LCORE];
@@ -66,8 +66,8 @@ struct __rte_cache_aligned worker_stats {
{
unsigned i, count = 0;
for (i = 0; i < worker_idx; i++)
- count += __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED);
+ count += rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed);
return count;
}
@@ -77,8 +77,8 @@ struct __rte_cache_aligned worker_stats {
{
unsigned int i;
for (i = 0; i < RTE_MAX_LCORE; i++)
- __atomic_store_n(&worker_stats[i].handled_packets, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&worker_stats[i].handled_packets, 0,
+ rte_memory_order_relaxed);
}
/* this is the basic worker function for sanity test
@@ -91,17 +91,17 @@ struct __rte_cache_aligned worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -162,8 +162,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with all zero hashes done.\n");
/* pick two flows and check they go correctly */
@@ -189,9 +189,9 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(
+ rte_atomic_load_explicit(
&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_memory_order_relaxed));
printf("Sanity test with two hash values done\n");
}
@@ -218,8 +218,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with non-zero hashes done\n");
rte_mempool_put_bulk(p, (void *)bufs, BURST);
@@ -311,18 +311,18 @@ struct __rte_cache_aligned worker_stats {
struct rte_distributor *d = wp->dist;
unsigned int i;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
rte_pktmbuf_free(buf[i]);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
@@ -381,51 +381,51 @@ struct __rte_cache_aligned worker_stats {
unsigned int num;
unsigned int zero_id = 0;
unsigned int zero_unset;
- const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
- __ATOMIC_RELAXED);
+ const unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
/* wait for quit single globally, or for worker zero, wait
* for zero_quit */
while (!quit && !(id == zero_id && zero_quit)) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
if (id == zero_id) {
rte_distributor_return_pkt(d, id, NULL, 0);
/* for worker zero, allow it to restart to pick up last packet
* when all workers are shutting down.
*/
- __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 1, rte_memory_order_release);
while (zero_quit)
usleep(100);
- __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 0, rte_memory_order_release);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets,
- num, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets,
+ num, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
}
@@ -491,17 +491,17 @@ struct __rte_cache_aligned worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST * 2) {
printf("Line %d: Error, not all packets flushed. "
@@ -560,18 +560,18 @@ struct __rte_cache_aligned worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
@@ -596,18 +596,18 @@ struct __rte_cache_aligned worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num, i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
*seq_field(buf[i]) += id + 1;
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -679,8 +679,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
/* Sort returned packets by sent order (sequence numbers). */
for (i = 0; i < buf_count; i++) {
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index c0ad39d..e678aec 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -31,7 +31,7 @@
/* static vars - zero initialized by default */
static volatile int quit;
-static volatile unsigned worker_idx;
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
struct __rte_cache_aligned worker_stats {
volatile unsigned handled_packets;
@@ -121,7 +121,7 @@ struct __rte_cache_aligned worker_stats {
struct rte_distributor *d = arg;
unsigned int num = 0;
int i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
alignas(RTE_CACHE_LINE_SIZE) struct rte_mbuf *buf[8];
for (i = 0; i < 8; i++)
diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index 9296de2..bae39af 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -53,12 +53,13 @@
#define MAX_LCORES (rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))
-static uint32_t obj_count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) obj_count;
+static RTE_ATOMIC(uint32_t) synchro;
#define WAIT_SYNCHRO_FOR_WORKERS() do { \
if (lcore_self != rte_get_main_lcore()) \
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, \
+ rte_memory_order_relaxed); \
} while(0)
/*
@@ -71,7 +72,8 @@
WAIT_SYNCHRO_FOR_WORKERS();
- __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
+ /* silent the check in the caller */
+ rte_atomic_store_explicit(&obj_count, 1, rte_memory_order_relaxed);
if (rte_eal_init(0, NULL) != -1)
return -1;
@@ -113,7 +115,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
if (rp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -178,7 +180,7 @@
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -244,7 +246,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_hash_create(&hash_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple times simultaneously */
@@ -311,7 +313,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_fbk_hash_create(&fbk_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -376,7 +378,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
if (lpm != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -437,8 +439,8 @@ struct test_case test_cases[] = {
if (pt_case->func == NULL)
return -1;
- __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&obj_count, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -448,7 +450,7 @@ struct test_case test_cases[] = {
rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
}
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
@@ -463,7 +465,7 @@ struct test_case test_cases[] = {
pt_case->clean(lcore_id);
}
- count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
+ count = rte_atomic_load_explicit(&obj_count, rte_memory_order_relaxed);
if (count != 1) {
printf("%s: common object allocated %d times (should be 1)\n",
pt_case->name, count);
diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
index ed9dd41..33d3147 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/app/test/test_hash_multiwriter.c
@@ -43,8 +43,8 @@ struct {
const uint32_t nb_total_tsx_insertion = 4.5*1024*1024;
uint32_t rounded_nb_total_tsx_insertion;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
static int use_htm;
@@ -84,8 +84,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)
tbl_multiwriter_test_params.keys[i]
@@ -166,8 +166,8 @@ struct {
tbl_multiwriter_test_params.found = found;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
/* Get list of enabled cores */
i = 0;
@@ -233,8 +233,8 @@ struct {
printf("No key corrupted during multiwriter insertion.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED)/
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed)/
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf(" cycles per insertion: %llu\n", cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 4997a01..1867376 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -45,14 +45,14 @@ struct {
struct rte_hash *h;
} tbl_rw_test_param;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
-static uint64_t gread_cycles;
-static uint64_t gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
-static uint64_t greads;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrites;
static int
test_hash_readwrite_worker(__rte_unused void *arg)
@@ -110,8 +110,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_rw_test_param.num_insert; i++)
tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
@@ -209,8 +209,8 @@ struct {
int worker_cnt = rte_lcore_count() - 1;
uint32_t tot_insert = 0;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
if (init_params(use_ext, use_htm, use_rw_lf, use_jhash) != 0)
goto err;
@@ -269,8 +269,8 @@ struct {
printf("No key corrupted during read-write test.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
@@ -310,8 +310,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, i, rte_memory_order_relaxed);
return 0;
}
@@ -344,9 +344,9 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rw_test_param.num_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rw_test_param.num_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -369,11 +369,11 @@ struct {
uint64_t start = 0, end = 0;
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
if (init_params(0, use_htm, 0, use_jhash) != 0)
goto err;
@@ -430,10 +430,10 @@ struct {
if (tot_worker_lcore < core_cnt[n] * 2)
goto finish;
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -475,8 +475,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_only[n] = cycles_per_insertion;
printf("Reader only: cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -484,17 +484,17 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->write_only[n] = cycles_per_insertion;
printf("Writer only: cycles per writes: %llu\n",
cycles_per_insertion);
}
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -569,8 +569,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_write_r[n] = cycles_per_insertion;
printf("Read-write cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -578,8 +578,8 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->read_write_w[n] = cycles_per_insertion;
printf("Read-write cycles per writes: %llu\n",
cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite_lf_perf.c b/app/test/test_hash_readwrite_lf_perf.c
index 5d18850..4523985 100644
--- a/app/test/test_hash_readwrite_lf_perf.c
+++ b/app/test/test_hash_readwrite_lf_perf.c
@@ -86,10 +86,10 @@ struct rwc_perf {
struct rte_hash *h;
} tbl_rwc_test_param;
-static uint64_t gread_cycles;
-static uint64_t greads;
-static uint64_t gwrite_cycles;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gwrites;
static volatile uint8_t writer_done;
@@ -651,8 +651,8 @@ struct rwc_perf {
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, read_cnt*loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, read_cnt*loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -724,8 +724,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -742,8 +742,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_hit[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -791,8 +791,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -811,8 +811,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_miss[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -861,8 +861,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -884,8 +884,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_nsp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -935,8 +935,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -958,8 +958,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_sp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1007,8 +1007,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1030,8 +1030,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
}
@@ -1087,9 +1087,9 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n",
rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0,
+ rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1127,10 +1127,10 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles,
- __ATOMIC_RELAXED) /
- __atomic_load_n(&greads,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles,
+ rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads,
+ rte_memory_order_relaxed);
rwc_perf_results->multi_rw[m][k][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n",
@@ -1178,8 +1178,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
write_type = WRITE_NO_KEY_SHIFT;
@@ -1210,8 +1210,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_extbkt[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1280,9 +1280,9 @@ struct rwc_perf {
tbl_rwc_test_param.keys_no_ks + i);
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rwc_test_param.single_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -1328,8 +1328,8 @@ struct rwc_perf {
rwc_core_cnt[n];
printf("\nNumber of writers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
rte_rcu_qsbr_init(rv, RTE_MAX_LCORE);
@@ -1364,8 +1364,8 @@ struct rwc_perf {
rte_eal_mp_wait_lcore();
unsigned long long cycles_per_write_operation =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
rwc_perf_results->writer_add_del[n]
= cycles_per_write_operation;
printf("Cycles per write operation: %llu\n",
diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index 3434a0d..bd5c0dd 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -10,6 +10,7 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_thread.h>
+#include <rte_stdatomic.h>
#include "test.h"
@@ -25,7 +26,7 @@ struct thread_context {
enum { Thread_INIT, Thread_ERROR, Thread_DONE } state;
bool lcore_id_any;
rte_thread_t id;
- unsigned int *registered_count;
+ RTE_ATOMIC(unsigned int) *registered_count;
};
static uint32_t thread_loop(void *arg)
@@ -49,10 +50,10 @@ static uint32_t thread_loop(void *arg)
t->state = Thread_ERROR;
}
/* Report register happened to the control thread. */
- __atomic_fetch_add(t->registered_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(t->registered_count, 1, rte_memory_order_release);
/* Wait for release from the control thread. */
- while (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(t->registered_count, rte_memory_order_acquire) != 0)
sched_yield();
rte_thread_unregister();
lcore_id = rte_lcore_id();
@@ -73,7 +74,7 @@ static uint32_t thread_loop(void *arg)
{
struct thread_context thread_contexts[RTE_MAX_LCORE];
unsigned int non_eal_threads_count;
- unsigned int registered_count;
+ RTE_ATOMIC(unsigned int) registered_count;
struct thread_context *t;
unsigned int i;
int ret;
@@ -93,7 +94,7 @@ static uint32_t thread_loop(void *arg)
}
printf("non-EAL threads count: %u\n", non_eal_threads_count);
/* Wait all non-EAL threads to register. */
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
@@ -109,14 +110,14 @@ static uint32_t thread_loop(void *arg)
if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
non_eal_threads_count++;
printf("non-EAL threads count: %u\n", non_eal_threads_count);
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
}
skip_lcore_any:
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -225,7 +226,7 @@ struct limit_lcore_context {
struct thread_context thread_contexts[2];
unsigned int non_eal_threads_count = 0;
struct limit_lcore_context l[2] = {};
- unsigned int registered_count = 0;
+ RTE_ATOMIC(unsigned int) registered_count = 0;
struct thread_context *t;
void *handle[2] = {};
unsigned int i;
@@ -275,7 +276,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 1 ||
@@ -298,7 +299,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 2 ||
@@ -315,7 +316,7 @@ struct limit_lcore_context {
}
rte_lcore_dump(stdout);
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -337,7 +338,7 @@ struct limit_lcore_context {
cleanup_threads:
/* Release all threads */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
rte_thread_join(t->id, NULL);
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
index 82daf9e..bc4bdde 100644
--- a/app/test/test_lpm_perf.c
+++ b/app/test/test_lpm_perf.c
@@ -22,8 +22,8 @@
struct rte_lpm *lpm;
static struct rte_rcu_qsbr *rv;
static volatile uint8_t writer_done;
-static volatile uint32_t thr_id;
-static uint64_t gwrite_cycles;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
static uint32_t num_writers;
/* LPM APIs are not thread safe, use spinlock */
@@ -362,7 +362,7 @@ static void generate_large_route_rule_table(void)
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -470,7 +470,7 @@ static void generate_large_route_rule_table(void)
total_cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, total_cycles, rte_memory_order_relaxed);
return 0;
@@ -540,9 +540,9 @@ static void generate_large_route_rule_table(void)
reader_f = test_lpm_reader;
writer_done = 0;
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
/* Launch reader threads */
for (i = j; i < num_cores; i++)
@@ -563,7 +563,7 @@ static void generate_large_route_rule_table(void)
printf("Total LPM Adds: %d\n", TOTAL_WRITES);
printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
printf("Average LPM Add/Del: %"PRIu64" cycles\n",
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed)
/ TOTAL_WRITES);
writer_done = 1;
diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c
index 46ff13c..8fcbc11 100644
--- a/app/test/test_mcslock.c
+++ b/app/test/test_mcslock.c
@@ -42,7 +42,7 @@
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_mcslock_per_core(__rte_unused void *arg)
@@ -75,7 +75,7 @@
rte_mcslock_t ml_perf_me;
/* wait synchro */
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -100,14 +100,14 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lock = 1;
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
@@ -116,11 +116,11 @@
printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index a42a772..130d598 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -88,7 +88,7 @@
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
/* number of objects in one bulk operation (get or put) */
static unsigned n_get_bulk;
@@ -188,7 +188,8 @@ struct __rte_cache_aligned mempool_test_stats {
/* wait synchro for workers */
if (lcore_id != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
start_cycles = rte_get_timer_cycles();
@@ -233,7 +234,7 @@ struct __rte_cache_aligned mempool_test_stats {
int ret;
unsigned cores_save = cores;
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
/* reset stats */
memset(stats, 0, sizeof(stats));
@@ -258,7 +259,7 @@ struct __rte_cache_aligned mempool_test_stats {
}
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
ret = per_lcore_mempool_test(mp);
diff --git a/app/test/test_pflock.c b/app/test/test_pflock.c
index 5f77b15..d989a68 100644
--- a/app/test/test_pflock.c
+++ b/app/test/test_pflock.c
@@ -31,7 +31,7 @@
static rte_pflock_t sl;
static rte_pflock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_pflock_per_core(__rte_unused void *arg)
@@ -69,7 +69,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -99,7 +100,7 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -107,7 +108,7 @@
printf("\nTest with phase-fair lock on single core...\n");
lock = 1;
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -116,12 +117,12 @@
printf("\nPhase-fair test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index 35fa068..995b0a6 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -537,7 +537,7 @@ enum {
return 0;
}
-static uint64_t start;
+static RTE_ATOMIC(uint64_t) start;
static inline int
poll_burst(void *args)
@@ -575,7 +575,7 @@ enum {
num[portid] = pkt_per_port;
}
- rte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_64((uint64_t *)(uintptr_t)&start, 1, rte_memory_order_acquire);
cur_tsc = rte_rdtsc();
while (total) {
@@ -629,9 +629,9 @@ enum {
/* only when polling first */
if (flags == SC_BURST_POLL_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_relaxed);
else
- __atomic_store_n(&start, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 0, rte_memory_order_relaxed);
/* start polling thread
* if in POLL_FIRST mode, poll once launched;
@@ -655,7 +655,7 @@ enum {
/* only when polling second */
if (flags == SC_BURST_XMIT_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_release);
/* wait for polling finished */
diff_tsc = rte_eal_wait_lcore(lcore);
diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c
index ce88a73..d1bf5c5 100644
--- a/app/test/test_rcu_qsbr_perf.c
+++ b/app/test/test_rcu_qsbr_perf.c
@@ -25,13 +25,15 @@
static uint32_t *hash_data[TOTAL_ENTRY];
static volatile uint8_t writer_done;
static volatile uint8_t all_registered;
-static volatile uint32_t thr_id;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
static struct rte_hash *h;
static char hash_name[8];
-static uint64_t updates, checks;
-static uint64_t update_cycles, check_cycles;
+static RTE_ATOMIC(uint64_t) updates;
+static RTE_ATOMIC(uint64_t) checks;
+static RTE_ATOMIC(uint64_t) update_cycles;
+static RTE_ATOMIC(uint64_t) check_cycles;
/* Scale down results to 1000 operations to support lower
* granularity clocks.
@@ -44,7 +46,7 @@
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -81,8 +83,8 @@
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
/* Make the thread offline */
rte_rcu_qsbr_thread_offline(t[0], thread_id);
@@ -113,8 +115,8 @@
} while (loop_cnt < 20000000);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -130,15 +132,15 @@
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
printf("\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\n",
num_cores - 1);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
if (all_registered == 1)
tmp_num_cores = num_cores - 1;
@@ -168,15 +170,16 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -193,10 +196,10 @@
size_t sz;
unsigned int i, tmp_num_cores;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf Test: %d Readers\n", num_cores);
@@ -220,11 +223,11 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -241,10 +244,10 @@
size_t sz;
unsigned int i;
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: %d Writers ('wait' in qsbr_check == false)\n",
num_cores);
@@ -266,10 +269,11 @@
/* Wait until all readers have exited */
rte_eal_mp_wait_lcore();
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -317,8 +321,8 @@
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
rte_rcu_qsbr_thread_unregister(temp, thread_id);
@@ -389,12 +393,12 @@ static struct rte_hash *init_hash(void)
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\n", num_cores);
@@ -453,8 +457,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
@@ -467,12 +471,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -511,7 +515,7 @@ static struct rte_hash *init_hash(void)
printf("Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\n", num_cores);
- __atomic_store_n(&thr_id, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_relaxed);
if (all_registered == 1)
tmp_num_cores = num_cores;
@@ -570,8 +574,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
/* Wait and check return value from reader threads */
@@ -583,12 +587,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -622,10 +626,10 @@ static struct rte_hash *init_hash(void)
return TEST_SKIPPED;
}
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
num_cores = 0;
RTE_LCORE_FOREACH_WORKER(core_id) {
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index d7c5a4c..6d7a0a8 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -186,7 +186,7 @@ struct thread_params {
void *burst = NULL;
#ifdef RTE_USE_C11_MEM_MODEL
- if (__atomic_fetch_add(&lcore_count, 1, __ATOMIC_RELAXED) + 1 != 2)
+ if (rte_atomic_fetch_add_explicit(&lcore_count, 1, rte_memory_order_relaxed) + 1 != 2)
#else
if (__sync_add_and_fetch(&lcore_count, 1) != 2)
#endif
@@ -320,7 +320,7 @@ struct thread_params {
return 0;
}
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static uint64_t queue_count[RTE_MAX_LCORE];
#define TIME_MS 100
@@ -342,7 +342,8 @@ struct thread_params {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (time_diff < hz * TIME_MS / 1000) {
@@ -397,12 +398,12 @@ struct thread_params {
param.r = r;
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lcore_f(¶m);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_ring_stress_impl.h b/app/test/test_ring_stress_impl.h
index 202d47d..8b0bfb1 100644
--- a/app/test/test_ring_stress_impl.h
+++ b/app/test/test_ring_stress_impl.h
@@ -24,7 +24,7 @@ enum {
WRK_CMD_RUN,
};
-static alignas(RTE_CACHE_LINE_SIZE) uint32_t wrk_cmd = WRK_CMD_STOP;
+static alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) wrk_cmd = WRK_CMD_STOP;
/* test run-time in seconds */
static const uint32_t run_time = 60;
@@ -203,7 +203,7 @@ struct __rte_cache_aligned ring_elem {
* really releasing any data through 'wrk_cmd' to
* the worker.
*/
- while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) != WRK_CMD_RUN)
+ while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) != WRK_CMD_RUN)
rte_pause();
cl = rte_rdtsc_precise();
@@ -246,7 +246,7 @@ struct __rte_cache_aligned ring_elem {
lcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);
- } while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) == WRK_CMD_RUN);
+ } while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) == WRK_CMD_RUN);
cl = rte_rdtsc_precise() - cl;
if (prcs == 0)
@@ -360,12 +360,12 @@ struct __rte_cache_aligned ring_elem {
}
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_RUN, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_RUN, rte_memory_order_release);
rte_delay_us(run_time * US_PER_S);
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_STOP, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_STOP, rte_memory_order_release);
/* wait for workers and collect stats. */
mc = rte_lcore_id();
diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index e8767da..6777b91 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -35,7 +35,7 @@
static rte_rwlock_t sl;
static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
enum {
LC_TYPE_RDLOCK,
@@ -101,7 +101,8 @@ struct __rte_cache_aligned try_rwlock_lcore {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -134,12 +135,12 @@ struct __rte_cache_aligned try_rwlock_lcore {
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(NULL);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c
index bab8b0f..c5e5e64 100644
--- a/app/test/test_seqlock.c
+++ b/app/test/test_seqlock.c
@@ -22,7 +22,7 @@ struct __rte_cache_aligned data {
struct reader {
struct data *data;
- uint8_t stop;
+ RTE_ATOMIC(uint8_t) stop;
};
#define WRITER_RUNTIME 2.0 /* s */
@@ -79,7 +79,7 @@ struct reader {
struct reader *r = arg;
int rc = TEST_SUCCESS;
- while (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&
+ while (rte_atomic_load_explicit(&r->stop, rte_memory_order_relaxed) == 0 &&
rc == TEST_SUCCESS) {
struct data *data = r->data;
bool interrupted;
@@ -115,7 +115,7 @@ struct reader {
static void
reader_stop(struct reader *reader)
{
- __atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&reader->stop, 1, rte_memory_order_relaxed);
}
#define NUM_WRITERS 2 /* main lcore + one worker */
diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c
index c12d52d..010ab82 100644
--- a/app/test/test_service_cores.c
+++ b/app/test/test_service_cores.c
@@ -59,15 +59,15 @@ static int32_t dummy_mt_unsafe_cb(void *args)
* test, because two threads are concurrently in a non-MT safe callback.
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so clear pass flag */
*pass_test = 0;
@@ -86,15 +86,15 @@ static int32_t dummy_mt_safe_cb(void *args)
* that 2 threads are running the callback at the same time: MT safe
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so set pass flag */
*pass_test = 1;
@@ -748,15 +748,15 @@ static int32_t dummy_mt_safe_cb(void *args)
/* retrieve done flag and lock to add/sub */
uint32_t *done = ¶ms[0];
- uint32_t *lock = ¶ms[1];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)¶ms[1];
while (!*done) {
- __atomic_fetch_add(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(lock, 1, rte_memory_order_relaxed);
rte_delay_us(500);
- if (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1)
+ if (rte_atomic_load_explicit(lock, rte_memory_order_relaxed) > 1)
/* pass: second core has simultaneously incremented */
*done = 1;
- __atomic_fetch_sub(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(lock, 1, rte_memory_order_relaxed);
}
return 0;
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 9a481f2..a29405a 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -48,7 +48,7 @@
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_spinlock_per_core(__rte_unused void *arg)
@@ -110,7 +110,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -149,11 +150,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c
index c5e1caa..3f17a26 100644
--- a/app/test/test_stack_perf.c
+++ b/app/test/test_stack_perf.c
@@ -23,7 +23,7 @@
*/
static volatile unsigned int bulk_sizes[] = {8, MAX_BURST};
-static uint32_t lcore_barrier;
+static RTE_ATOMIC(uint32_t) lcore_barrier;
struct lcore_pair {
unsigned int c1;
@@ -143,8 +143,8 @@ struct thread_args {
s = args->s;
size = args->sz;
- __atomic_fetch_sub(&lcore_barrier, 1, __ATOMIC_RELAXED);
- rte_wait_until_equal_32(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&lcore_barrier, 1, rte_memory_order_relaxed);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&lcore_barrier, 0, rte_memory_order_relaxed);
uint64_t start = rte_rdtsc();
@@ -173,7 +173,7 @@ struct thread_args {
unsigned int i;
for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
- __atomic_store_n(&lcore_barrier, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 2, rte_memory_order_relaxed);
args[0].sz = args[1].sz = bulk_sizes[i];
args[0].s = args[1].s = s;
@@ -206,7 +206,7 @@ struct thread_args {
int cnt = 0;
double avg;
- __atomic_store_n(&lcore_barrier, n, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, n, rte_memory_order_relaxed);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (++cnt >= n)
@@ -300,7 +300,7 @@ struct thread_args {
struct lcore_pair cores;
struct rte_stack *s;
- __atomic_store_n(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 0, rte_memory_order_relaxed);
s = rte_stack_create(STACK_NAME, STACK_SIZE, rte_socket_id(), flags);
if (s == NULL) {
diff --git a/app/test/test_threads.c b/app/test/test_threads.c
index 4ac3f26..6d6881a 100644
--- a/app/test/test_threads.c
+++ b/app/test/test_threads.c
@@ -6,12 +6,13 @@
#include <rte_thread.h>
#include <rte_debug.h>
+#include <rte_stdatomic.h>
#include "test.h"
RTE_LOG_REGISTER(threads_logtype_test, test.threads, INFO);
-static uint32_t thread_id_ready;
+static RTE_ATOMIC(uint32_t) thread_id_ready;
static uint32_t
thread_main(void *arg)
@@ -19,9 +20,9 @@
if (arg != NULL)
*(rte_thread_t *)arg = rte_thread_self();
- __atomic_store_n(&thread_id_ready, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 1, rte_memory_order_release);
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 1)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 1)
;
return 0;
@@ -37,13 +38,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
@@ -61,13 +62,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main,
&thread_main_id) == 0, "Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_detach(thread_id) == 0,
"Failed to detach thread.");
@@ -85,7 +86,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
priority = RTE_THREAD_PRIORITY_NORMAL;
@@ -121,7 +122,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Priority set mismatches priority get");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -137,7 +138,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset0) == 0,
@@ -190,7 +191,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes affinity thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset1) == 0,
@@ -198,7 +199,7 @@
RTE_TEST_ASSERT(memcmp(&cpuset0, &cpuset1, sizeof(rte_cpuset_t)) == 0,
"Failed to apply affinity attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -219,7 +220,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes priority thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_priority(thread_id, &priority) == 0,
@@ -227,7 +228,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Failed to apply priority attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -243,13 +244,13 @@
thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
index 7a6cb4c..ad4a2d8 100644
--- a/app/test/test_ticketlock.c
+++ b/app/test/test_ticketlock.c
@@ -48,7 +48,7 @@
static rte_ticketlock_recursive_t tlr;
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_ticketlock_per_core(__rte_unused void *arg)
@@ -111,7 +111,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcore_count[lcore] < MAX_LOOP) {
@@ -153,11 +154,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index cac8fc0..dc15a80 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -202,7 +202,7 @@ struct mytimerinfo {
/* Need to synchronize worker lcores through multiple steps. */
enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
-static uint16_t lcore_state[RTE_MAX_LCORE];
+static RTE_ATOMIC(uint16_t) lcore_state[RTE_MAX_LCORE];
static void
main_init_workers(void)
@@ -210,7 +210,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_WAITING, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_WAITING,
+ rte_memory_order_relaxed);
}
}
@@ -220,10 +221,12 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_RUN_SIGNAL, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL,
+ rte_memory_order_release);
}
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_RUNNING,
+ rte_memory_order_acquire);
}
}
@@ -233,7 +236,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_FINISHED,
+ rte_memory_order_acquire);
}
}
@@ -242,8 +246,10 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- rte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, __ATOMIC_ACQUIRE);
- __atomic_store_n(&lcore_state[lcore_id], WORKER_RUNNING, __ATOMIC_RELEASE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[lcore_id], WORKER_RUN_SIGNAL,
+ rte_memory_order_acquire);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING,
+ rte_memory_order_release);
}
static void
@@ -251,7 +257,8 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- __atomic_store_n(&lcore_state[lcore_id], WORKER_FINISHED, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED,
+ rte_memory_order_release);
}
@@ -277,12 +284,12 @@ struct mytimerinfo {
unsigned int lcore_id = rte_lcore_id();
unsigned int main_lcore = rte_get_main_lcore();
int32_t my_collisions = 0;
- static uint32_t collisions;
+ static RTE_ATOMIC(uint32_t) collisions;
if (lcore_id == main_lcore) {
cb_count = 0;
test_failed = 0;
- __atomic_store_n(&collisions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&collisions, 0, rte_memory_order_relaxed);
timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
if (timers == NULL) {
printf("Test Failed\n");
@@ -310,7 +317,7 @@ struct mytimerinfo {
my_collisions++;
}
if (my_collisions != 0)
- __atomic_fetch_add(&collisions, my_collisions, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&collisions, my_collisions, rte_memory_order_relaxed);
/* wait long enough for timers to expire */
rte_delay_ms(100);
@@ -324,7 +331,7 @@ struct mytimerinfo {
/* now check that we get the right number of callbacks */
if (lcore_id == main_lcore) {
- my_collisions = __atomic_load_n(&collisions, __ATOMIC_RELAXED);
+ my_collisions = rte_atomic_load_explicit(&collisions, rte_memory_order_relaxed);
if (my_collisions != 0)
printf("- %d timer reset collisions (OK)\n", my_collisions);
rte_timer_manage();
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 42/45] app/test-eventdev: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (40 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 41/45] app/test: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 43/45] app/test-crypto-perf: " Tyler Retzlaff
` (3 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-eventdev/test_order_atq.c | 4 ++--
app/test-eventdev/test_order_common.c | 5 +++--
app/test-eventdev/test_order_common.h | 8 ++++----
app/test-eventdev/test_order_queue.c | 4 ++--
app/test-eventdev/test_perf_common.h | 6 +++---
5 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 2fee4b4..128d3f2 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index a9894c6..0fceace 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -189,7 +189,7 @@
evt_err("failed to allocate t->expected_flow_seq memory");
goto exp_nomem;
}
- __atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
t->err = false;
t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
@@ -296,7 +296,8 @@
while (t->err == false) {
uint64_t new_cycles = rte_get_timer_cycles();
- int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
+ int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
+ rte_memory_order_relaxed);
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index d4cbc5c..7177fd8 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -48,7 +48,7 @@ struct __rte_cache_aligned test_order {
* The atomic_* is an expensive operation,Since it is a functional test,
* We are using the atomic_ operation to reduce the code complexity.
*/
- uint64_t outstand_pkts;
+ RTE_ATOMIC(uint64_t) outstand_pkts;
enum evt_test_result result;
uint32_t nb_flows;
uint64_t nb_pkts;
@@ -95,7 +95,7 @@ struct __rte_cache_aligned test_order {
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
- uint64_t *const outstand_pkts)
+ RTE_ATOMIC(uint64_t) *const outstand_pkts)
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
@@ -113,7 +113,7 @@ struct __rte_cache_aligned test_order {
*/
expected_flow_seq[flow]++;
rte_pktmbuf_free(ev->mbuf);
- __atomic_fetch_sub(outstand_pkts, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -132,7 +132,7 @@ struct __rte_cache_aligned test_order {
const uint8_t port = w->port_id;\
const uint32_t nb_flows = t->nb_flows;\
uint32_t *expected_flow_seq = t->expected_flow_seq;\
- uint64_t *outstand_pkts = &t->outstand_pkts;\
+ RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
if (opt->verbose_level > 1)\
printf("%s(): lcore %d dev_id %d port=%d\n",\
__func__, rte_lcore_id(), dev_id, port)
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 80eaea5..a282ab2 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index bc627de..d60b873 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -225,7 +225,7 @@ struct __rte_cache_aligned perf_elt {
* stored before updating the number of
* processed packets for worker lcores
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -270,7 +270,7 @@ struct __rte_cache_aligned perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -325,7 +325,7 @@ struct __rte_cache_aligned perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts += vec->nb_elem;
if (enable_fwd_latency) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 43/45] app/test-crypto-perf: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (41 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 42/45] app/test-eventdev: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 44/45] app/test-compress-perf: " Tyler Retzlaff
` (2 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-crypto-perf/cperf_test_latency.c | 6 +++---
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +++++-----
app/test-crypto-perf/cperf_test_throughput.c | 10 +++++-----
app/test-crypto-perf/cperf_test_verify.c | 10 +++++-----
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 99b7d7c..b8ad6bf 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -136,7 +136,7 @@ struct priv_op_data {
uint32_t imix_idx = 0;
int ret = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
if (ctx == NULL)
return 0;
@@ -341,8 +341,8 @@ struct priv_op_data {
uint16_t exp = 0;
if (ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
"cycles, time (us)");
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 4a60f6d..7191d99 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -396,7 +396,7 @@ struct pmd_cyclecount_state {
state.lcore = rte_lcore_id();
state.linearize = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
static bool warmup = true;
/*
@@ -443,8 +443,8 @@ struct pmd_cyclecount_state {
uint16_t exp = 0;
if (!opts->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
@@ -460,8 +460,8 @@ struct pmd_cyclecount_state {
state.cycles_per_enq,
state.cycles_per_deq);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index e3d266d..c0891e7 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -107,7 +107,7 @@ struct cperf_throughput_ctx {
uint8_t burst_size_idx = 0;
uint32_t imix_idx = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
@@ -277,8 +277,8 @@ struct cperf_throughput_ctx {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst Size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -298,8 +298,8 @@ struct cperf_throughput_ctx {
throughput_gbps,
cycles_per_packet);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 3548509..222c7a1 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -216,7 +216,7 @@ struct cperf_op_result {
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
uint64_t ops_failed = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint64_t i;
uint16_t ops_unused = 0;
@@ -370,8 +370,8 @@ struct cperf_op_result {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -388,8 +388,8 @@ struct cperf_op_result {
ops_deqd_failed,
ops_failed);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore id, Buffer Size(B), "
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Failed Ops\n");
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 44/45] app/test-compress-perf: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (42 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 43/45] app/test-crypto-perf: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-06 17:58 ` [PATCH v5 45/45] app/test-bbdev: " Tyler Retzlaff
2024-05-07 2:19 ` [PATCH v5 00/45] use " Patrick Robb
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 ++--
app/test-compress-perf/comp_perf_test_throughput.c | 10 +++++-----
app/test-compress-perf/comp_perf_test_verify.c | 6 +++---
4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/app/test-compress-perf/comp_perf_test_common.h b/app/test-compress-perf/comp_perf_test_common.h
index d039e5a..085e269 100644
--- a/app/test-compress-perf/comp_perf_test_common.h
+++ b/app/test-compress-perf/comp_perf_test_common.h
@@ -14,7 +14,7 @@ struct cperf_mem_resources {
uint16_t qp_id;
uint8_t lcore_id;
- uint16_t print_info_once;
+ RTE_ATOMIC(uint16_t) print_info_once;
uint32_t total_bufs;
uint8_t *compressed_data;
diff --git a/app/test-compress-perf/comp_perf_test_cyclecount.c b/app/test-compress-perf/comp_perf_test_cyclecount.c
index 4d336ec..64e8faa 100644
--- a/app/test-compress-perf/comp_perf_test_cyclecount.c
+++ b/app/test-compress-perf/comp_perf_test_cyclecount.c
@@ -498,8 +498,8 @@ struct cperf_cyclecount_ctx {
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
diff --git a/app/test-compress-perf/comp_perf_test_throughput.c b/app/test-compress-perf/comp_perf_test_throughput.c
index 1f7072d..089d19c 100644
--- a/app/test-compress-perf/comp_perf_test_throughput.c
+++ b/app/test-compress-perf/comp_perf_test_throughput.c
@@ -336,7 +336,7 @@
struct cperf_benchmark_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->ver.options;
uint32_t lcore = rte_lcore_id();
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
int i, ret = EXIT_SUCCESS;
ctx->ver.mem.lcore_id = lcore;
@@ -345,8 +345,8 @@
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
@@ -413,8 +413,8 @@
}
exp = 0;
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("\n%12s%6s%12s%17s%15s%16s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]",
"Comp [Gbps]", "Decomp [Gbps]");
diff --git a/app/test-compress-perf/comp_perf_test_verify.c b/app/test-compress-perf/comp_perf_test_verify.c
index 7bd1807..09d97c5 100644
--- a/app/test-compress-perf/comp_perf_test_verify.c
+++ b/app/test-compress-perf/comp_perf_test_verify.c
@@ -396,7 +396,7 @@
struct cperf_verify_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->options;
int ret = EXIT_SUCCESS;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint32_t lcore = rte_lcore_id();
uint16_t exp = 0;
@@ -452,8 +452,8 @@
test_data->input_data_sz * 100;
if (!ctx->silent) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("%12s%6s%12s%17s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]");
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v5 45/45] app/test-bbdev: use rte stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (43 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 44/45] app/test-compress-perf: " Tyler Retzlaff
@ 2024-05-06 17:58 ` Tyler Retzlaff
2024-05-07 2:19 ` [PATCH v5 00/45] use " Patrick Robb
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-06 17:58 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++++++++++++----------------
1 file changed, 110 insertions(+), 73 deletions(-)
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index dcce00a..9694ed3 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -144,7 +144,7 @@ struct test_op_params {
uint16_t num_to_process;
uint16_t num_lcores;
int vector_mask;
- uint16_t sync;
+ RTE_ATOMIC(uint16_t) sync;
struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
};
@@ -159,9 +159,9 @@ struct thread_params {
uint8_t iter_count;
double iter_average;
double bler;
- uint16_t nb_dequeued;
- int16_t processing_status;
- uint16_t burst_sz;
+ RTE_ATOMIC(uint16_t) nb_dequeued;
+ RTE_ATOMIC(int16_t) processing_status;
+ RTE_ATOMIC(uint16_t) burst_sz;
struct test_op_params *op_params;
struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
@@ -3195,56 +3195,64 @@ typedef int (test_case_function)(struct active_device *ad,
}
if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
printf(
"Dequeue interrupt handler called for incorrect event!\n");
return;
}
- burst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);
+ burst_sz = rte_atomic_load_explicit(&tp->burst_sz, rte_memory_order_relaxed);
num_ops = tp->op_params->num_to_process;
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
deq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,
&tp->fft_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
deq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
&tp->mldts_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else /*RTE_BBDEV_OP_TURBO_ENC*/
deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
if (deq < burst_sz) {
printf(
"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
burst_sz, deq);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
- if (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) + deq < num_ops) {
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
return;
}
@@ -3288,7 +3296,8 @@ typedef int (test_case_function)(struct active_device *ad,
if (ret) {
printf("Buffers validation failed\n");
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
}
switch (test_vector.op_type) {
@@ -3315,7 +3324,8 @@ typedef int (test_case_function)(struct active_device *ad,
break;
default:
printf("Unknown op type: %d\n", test_vector.op_type);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
@@ -3324,7 +3334,7 @@ typedef int (test_case_function)(struct active_device *ad,
tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
((double)total_time / (double)rte_get_tsc_hz());
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
}
static int
@@ -3362,10 +3372,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3415,15 +3426,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3459,10 +3472,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3506,15 +3520,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3549,10 +3565,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3592,15 +3609,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3636,10 +3655,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3681,15 +3701,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3725,10 +3747,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3769,15 +3792,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3811,10 +3836,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, num_to_process);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_to_process);
@@ -3851,15 +3877,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3894,7 +3922,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4013,7 +4042,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4148,7 +4178,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4271,7 +4302,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4402,7 +4434,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4503,7 +4536,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4604,7 +4638,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4702,7 +4737,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4898,7 +4934,7 @@ typedef int (test_case_function)(struct active_device *ad,
else
return TEST_SKIPPED;
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -4921,7 +4957,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = bler_function(&t_params[0]);
/* Main core is always used */
@@ -5024,7 +5060,7 @@ typedef int (test_case_function)(struct active_device *ad,
throughput_function = throughput_pmd_lcore_enc;
}
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -5047,7 +5083,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = throughput_function(&t_params[0]);
/* Main core is always used */
@@ -5077,29 +5113,30 @@ typedef int (test_case_function)(struct active_device *ad,
* Wait for main lcore operations.
*/
tp = &t_params[0];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+ (rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed) !=
TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed);
/* Wait for worker lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp = &t_params[used_cores];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
- TEST_FAILED))
+ (rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed) != TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed);
}
/* Print throughput if test passed */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v5 00/45] use stdatomic API
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
` (44 preceding siblings ...)
2024-05-06 17:58 ` [PATCH v5 45/45] app/test-bbdev: " Tyler Retzlaff
@ 2024-05-07 2:19 ` Patrick Robb
45 siblings, 0 replies; 300+ messages in thread
From: Patrick Robb @ 2024-05-07 2:19 UTC (permalink / raw)
To: Tyler Retzlaff
Cc: dev, Mattias Rönnblom, Morten Brørup,
Abdullah Sevincer, Ajit Khaparde, Alok Prasad, Anatoly Burakov,
Andrew Rybchenko, Anoob Joseph, Bruce Richardson, Byron Marohn,
Chenbo Xia, Chengwen Feng, Ciara Loftus, Ciara Power,
Dariusz Sosnowski, David Hunt, Devendra Singh Rawat,
Erik Gabriel Carrillo, Guoyang Zhou, Harman Kalra,
Harry van Haaren, Honnappa Nagarahalli, Jakub Grajciar,
Jerin Jacob, Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai,
Jingjing Wu, Joshua Washington, Joyce Kong, Junfeng Guo,
Kevin Laatz, Konstantin Ananyev, Liang Ma, Long Li,
Maciej Czekaj, Matan Azrad, Maxime Coquelin, Nicolas Chautru,
Ori Kam, Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy,
Reshma Pattan, Rosen Xu, Ruifeng Wang, Rushil Gupta,
Sameh Gobriel, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang,
Yipeng Wang, Yisen Zhuang, Ziyang Xuan
Recheck-request: iol-broadcom-Performance
On Mon, May 6, 2024 at 1:58 PM Tyler Retzlaff
<roretzla@linux.microsoft.com> wrote:
>
> This series converts all non-generic built atomics to use the rte_atomic
> macros that allow optional enablement of standard C11 atomics.
>
> Use of generic atomics for non-scalar types are not converted in this
> change and will be evaluated as a part of a separate series.
> Specifically conversion of lib/lpm and drivers/x/cnxk will be addressed
> in a separate series to address use of generics.
>
> v5:
> * add missing RTE_ATOMIC for ``struct channel_info.status`` field.
>
> v4:
> * rebase after merge of move alignment attribute on types for MSVC,
> no other changes.
>
> v3:
> * event/dsw, wrap all lines <= 80 chars, align arguments to
> opening parenthesis.
> * event/dlb2, wrap changed lines <= 80 chars, remove comments
> referencing gcc __atomic built-ins.
> * bus/vmbus, remove comment referencing gcc atomic built-ins,
> fix mistake where monitor_mask was declared RTE_ATOMIC(uint32_t),
> fix mistake where pending was not declared RTE_ATOMIC(uint32_t),
> remove now unnecessary cast to __rte_atomic of pending (since
> the field is now properly declare RTE_ATOMIC).
>
> v2:
> * drop the net/sfc driver from the series. the sfc driver
> uses generic __atomic_store not handled by the current macros.
> the cases where generic __atomic_xxx are used on objects that
> can't be accepted by __atomic_xxx_n will be addressed in a
> separate series.
>
> Tyler Retzlaff (45):
> net/mlx5: use rte stdatomic API
> net/ixgbe: use rte stdatomic API
> net/iavf: use rte stdatomic API
> net/ice: use rte stdatomic API
> net/i40e: use rte stdatomic API
> net/hns3: use rte stdatomic API
> net/bnxt: use rte stdatomic API
> net/cpfl: use rte stdatomic API
> net/af_xdp: use rte stdatomic API
> net/octeon_ep: use rte stdatomic API
> net/octeontx: use rte stdatomic API
> net/cxgbe: use rte stdatomic API
> net/gve: use rte stdatomic API
> net/memif: use rte stdatomic API
> net/thunderx: use rte stdatomic API
> net/virtio: use rte stdatomic API
> net/hinic: use rte stdatomic API
> net/idpf: use rte stdatomic API
> net/qede: use rte stdatomic API
> net/ring: use rte stdatomic API
> vdpa/mlx5: use rte stdatomic API
> raw/ifpga: use rte stdatomic API
> event/opdl: use rte stdatomic API
> event/octeontx: use rte stdatomic API
> event/dsw: use rte stdatomic API
> dma/skeleton: use rte stdatomic API
> crypto/octeontx: use rte stdatomic API
> common/mlx5: use rte stdatomic API
> common/idpf: use rte stdatomic API
> common/iavf: use rte stdatomic API
> baseband/acc: use rte stdatomic API
> net/txgbe: use rte stdatomic API
> net/null: use rte stdatomic API
> event/dlb2: use rte stdatomic API
> dma/idxd: use rte stdatomic API
> crypto/ccp: use rte stdatomic API
> common/cpt: use rte stdatomic API
> bus/vmbus: use rte stdatomic API
> examples: use rte stdatomic API
> app/dumpcap: use rte stdatomic API
> app/test: use rte stdatomic API
> app/test-eventdev: use rte stdatomic API
> app/test-crypto-perf: use rte stdatomic API
> app/test-compress-perf: use rte stdatomic API
> app/test-bbdev: use rte stdatomic API
>
> app/dumpcap/main.c | 12 +-
> app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++--------
> app/test-compress-perf/comp_perf_test_common.h | 2 +-
> app/test-compress-perf/comp_perf_test_cyclecount.c | 4 +-
> app/test-compress-perf/comp_perf_test_throughput.c | 10 +-
> app/test-compress-perf/comp_perf_test_verify.c | 6 +-
> app/test-crypto-perf/cperf_test_latency.c | 6 +-
> app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +-
> app/test-crypto-perf/cperf_test_throughput.c | 10 +-
> app/test-crypto-perf/cperf_test_verify.c | 10 +-
> app/test-eventdev/test_order_atq.c | 4 +-
> app/test-eventdev/test_order_common.c | 5 +-
> app/test-eventdev/test_order_common.h | 8 +-
> app/test-eventdev/test_order_queue.c | 4 +-
> app/test-eventdev/test_perf_common.h | 6 +-
> app/test/test_bpf.c | 46 ++++--
> app/test/test_distributor.c | 114 ++++++-------
> app/test/test_distributor_perf.c | 4 +-
> app/test/test_func_reentrancy.c | 28 ++--
> app/test/test_hash_multiwriter.c | 16 +-
> app/test/test_hash_readwrite.c | 74 ++++-----
> app/test/test_hash_readwrite_lf_perf.c | 88 +++++-----
> app/test/test_lcores.c | 25 +--
> app/test/test_lpm_perf.c | 14 +-
> app/test/test_mcslock.c | 12 +-
> app/test/test_mempool_perf.c | 9 +-
> app/test/test_pflock.c | 13 +-
> app/test/test_pmd_perf.c | 10 +-
> app/test/test_rcu_qsbr_perf.c | 114 ++++++-------
> app/test/test_ring_perf.c | 11 +-
> app/test/test_ring_stress_impl.h | 10 +-
> app/test/test_rwlock.c | 9 +-
> app/test/test_seqlock.c | 6 +-
> app/test/test_service_cores.c | 24 +--
> app/test/test_spinlock.c | 9 +-
> app/test/test_stack_perf.c | 12 +-
> app/test/test_threads.c | 33 ++--
> app/test/test_ticketlock.c | 9 +-
> app/test/test_timer.c | 31 ++--
> drivers/baseband/acc/rte_acc100_pmd.c | 36 ++--
> drivers/baseband/acc/rte_vrb_pmd.c | 46 ++++--
> drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
> drivers/bus/vmbus/vmbus_channel.c | 8 +-
> drivers/common/cpt/cpt_common.h | 2 +-
> drivers/common/iavf/iavf_impl.c | 4 +-
> drivers/common/idpf/idpf_common_device.h | 6 +-
> drivers/common/idpf/idpf_common_rxtx.c | 14 +-
> drivers/common/idpf/idpf_common_rxtx.h | 2 +-
> drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 +-
> drivers/common/mlx5/linux/mlx5_nl.c | 5 +-
> drivers/common/mlx5/mlx5_common.h | 2 +-
> drivers/common/mlx5/mlx5_common_mr.c | 16 +-
> drivers/common/mlx5/mlx5_common_mr.h | 2 +-
> drivers/common/mlx5/mlx5_common_utils.c | 32 ++--
> drivers/common/mlx5/mlx5_common_utils.h | 6 +-
> drivers/common/mlx5/mlx5_malloc.c | 58 +++----
> drivers/crypto/ccp/ccp_dev.c | 8 +-
> drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 +-
> drivers/dma/idxd/idxd_internal.h | 2 +-
> drivers/dma/idxd/idxd_pci.c | 9 +-
> drivers/dma/skeleton/skeleton_dmadev.c | 5 +-
> drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
> drivers/event/dlb2/dlb2.c | 34 ++--
> drivers/event/dlb2/dlb2_priv.h | 13 +-
> drivers/event/dlb2/dlb2_xstats.c | 2 +-
> drivers/event/dsw/dsw_evdev.h | 6 +-
> drivers/event/dsw/dsw_event.c | 47 ++++--
> drivers/event/dsw/dsw_xstats.c | 4 +-
> drivers/event/octeontx/timvf_evdev.h | 8 +-
> drivers/event/octeontx/timvf_worker.h | 36 ++--
> drivers/event/opdl/opdl_ring.c | 80 ++++-----
> drivers/net/af_xdp/rte_eth_af_xdp.c | 20 ++-
> drivers/net/bnxt/bnxt_cpr.h | 4 +-
> drivers/net/bnxt/bnxt_rxq.h | 2 +-
> drivers/net/bnxt/bnxt_rxr.c | 13 +-
> drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
> drivers/net/bnxt/bnxt_stats.c | 4 +-
> drivers/net/cpfl/cpfl_ethdev.c | 8 +-
> drivers/net/cxgbe/clip_tbl.c | 12 +-
> drivers/net/cxgbe/clip_tbl.h | 2 +-
> drivers/net/cxgbe/cxgbe_main.c | 20 +--
> drivers/net/cxgbe/cxgbe_ofld.h | 6 +-
> drivers/net/cxgbe/l2t.c | 12 +-
> drivers/net/cxgbe/l2t.h | 2 +-
> drivers/net/cxgbe/mps_tcam.c | 21 +--
> drivers/net/cxgbe/mps_tcam.h | 2 +-
> drivers/net/cxgbe/smt.c | 12 +-
> drivers/net/cxgbe/smt.h | 2 +-
> drivers/net/gve/base/gve_osdep.h | 4 +-
> drivers/net/hinic/hinic_pmd_rx.c | 2 +-
> drivers/net/hinic/hinic_pmd_rx.h | 2 +-
> drivers/net/hns3/hns3_cmd.c | 18 +-
> drivers/net/hns3/hns3_dcb.c | 2 +-
> drivers/net/hns3/hns3_ethdev.c | 36 ++--
> drivers/net/hns3/hns3_ethdev.h | 32 ++--
> drivers/net/hns3/hns3_ethdev_vf.c | 60 +++----
> drivers/net/hns3/hns3_intr.c | 36 ++--
> drivers/net/hns3/hns3_intr.h | 4 +-
> drivers/net/hns3/hns3_mbx.c | 6 +-
> drivers/net/hns3/hns3_mp.c | 6 +-
> drivers/net/hns3/hns3_rxtx.c | 10 +-
> drivers/net/hns3/hns3_tm.c | 4 +-
> drivers/net/i40e/i40e_ethdev.c | 4 +-
> drivers/net/i40e/i40e_rxtx.c | 6 +-
> drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
> drivers/net/iavf/iavf.h | 16 +-
> drivers/net/iavf/iavf_rxtx.c | 4 +-
> drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
> drivers/net/iavf/iavf_vchnl.c | 14 +-
> drivers/net/ice/base/ice_osdep.h | 4 +-
> drivers/net/ice/ice_dcf.c | 6 +-
> drivers/net/ice/ice_dcf.h | 2 +-
> drivers/net/ice/ice_dcf_ethdev.c | 8 +-
> drivers/net/ice/ice_dcf_parent.c | 16 +-
> drivers/net/ice/ice_ethdev.c | 12 +-
> drivers/net/ice/ice_ethdev.h | 2 +-
> drivers/net/idpf/idpf_ethdev.c | 7 +-
> drivers/net/ixgbe/ixgbe_ethdev.c | 14 +-
> drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
> drivers/net/ixgbe/ixgbe_rxtx.c | 4 +-
> drivers/net/memif/memif.h | 4 +-
> drivers/net/memif/rte_eth_memif.c | 50 +++---
> drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
> drivers/net/mlx5/linux/mlx5_verbs.c | 9 +-
> drivers/net/mlx5/mlx5.c | 9 +-
> drivers/net/mlx5/mlx5.h | 66 ++++----
> drivers/net/mlx5/mlx5_flow.c | 37 +++--
> drivers/net/mlx5/mlx5_flow.h | 8 +-
> drivers/net/mlx5/mlx5_flow_aso.c | 43 +++--
> drivers/net/mlx5/mlx5_flow_dv.c | 126 +++++++-------
> drivers/net/mlx5/mlx5_flow_flex.c | 14 +-
> drivers/net/mlx5/mlx5_flow_hw.c | 61 +++----
> drivers/net/mlx5/mlx5_flow_meter.c | 30 ++--
> drivers/net/mlx5/mlx5_flow_quota.c | 32 ++--
> drivers/net/mlx5/mlx5_hws_cnt.c | 71 ++++----
> drivers/net/mlx5/mlx5_hws_cnt.h | 10 +-
> drivers/net/mlx5/mlx5_rx.h | 14 +-
> drivers/net/mlx5/mlx5_rxq.c | 30 ++--
> drivers/net/mlx5/mlx5_trigger.c | 2 +-
> drivers/net/mlx5/mlx5_tx.h | 18 +-
> drivers/net/mlx5/mlx5_txpp.c | 84 +++++-----
> drivers/net/mlx5/mlx5_txq.c | 12 +-
> drivers/net/mlx5/mlx5_utils.c | 10 +-
> drivers/net/mlx5/mlx5_utils.h | 4 +-
> drivers/net/null/rte_eth_null.c | 12 +-
> drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +-
> drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +-
> drivers/net/octeon_ep/cnxk_ep_vf.c | 8 +-
> drivers/net/octeon_ep/otx2_ep_vf.c | 8 +-
> drivers/net/octeon_ep/otx_ep_common.h | 4 +-
> drivers/net/octeon_ep/otx_ep_rxtx.c | 6 +-
> drivers/net/octeontx/octeontx_ethdev.c | 8 +-
> drivers/net/qede/base/bcm_osal.c | 6 +-
> drivers/net/ring/rte_eth_ring.c | 8 +-
> drivers/net/thunderx/nicvf_rxtx.c | 9 +-
> drivers/net/thunderx/nicvf_struct.h | 4 +-
> drivers/net/txgbe/txgbe_ethdev.c | 12 +-
> drivers/net/txgbe/txgbe_ethdev.h | 2 +-
> drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
> drivers/net/virtio/virtio_ring.h | 4 +-
> drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 +-
> drivers/net/virtio/virtqueue.h | 32 ++--
> drivers/raw/ifpga/ifpga_rawdev.c | 9 +-
> drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +--
> drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +-
> drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++---
> drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 +-
> drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 +-
> drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 +-
> examples/bbdev_app/main.c | 13 +-
> examples/l2fwd-event/l2fwd_common.h | 4 +-
> examples/l2fwd-event/l2fwd_event.c | 24 +--
> examples/l2fwd-jobstats/main.c | 11 +-
> .../client_server_mp/mp_server/main.c | 6 +-
> examples/server_node_efd/efd_server/main.c | 6 +-
> examples/vhost/main.c | 32 ++--
> examples/vhost/main.h | 4 +-
> examples/vhost/virtio_net.c | 13 +-
> examples/vhost_blk/vhost_blk.c | 8 +-
> examples/vm_power_manager/channel_manager.h | 4 +-
> examples/vm_power_manager/channel_monitor.c | 9 +-
> examples/vm_power_manager/vm_power_cli.c | 3 +-
> 182 files changed, 1646 insertions(+), 1502 deletions(-)
>
> --
> 1.8.3.1
>
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 00/45] use stdatomic API
2024-03-20 20:50 [PATCH 00/46] use stdatomic API Tyler Retzlaff
` (50 preceding siblings ...)
2024-05-06 17:57 ` [PATCH v5 00/45] use " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 01/45] net/mlx5: use rte " Tyler Retzlaff
` (45 more replies)
51 siblings, 46 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
This series converts all non-generic built atomics to use the rte_atomic
macros that allow optional enablement of standard C11 atomics.
Use of generic atomics for non-scalar types are not converted in this
change and will be evaluated as a part of a separate series.
Specifically conversion of lib/lpm and drivers/x/cnxk will be addressed
in a separate series to address use of generics.
v6:
* convert 4 spaces to 2 tabs in line-continuations exceeding 100
columns in baseband/acc/rte_vrb_pmd.c.
v5:
* add missing RTE_ATOMIC for ``struct channel_info.status`` field.
v4:
* rebase after merge of move alignment attribute on types for MSVC,
no other changes.
v3:
* event/dsw, wrap all lines <= 80 chars, align arguments to
opening parenthesis.
* event/dlb2, wrap changed lines <= 80 chars, remove comments
referencing gcc __atomic built-ins.
* bus/vmbus, remove comment referencing gcc atomic built-ins,
fix mistake where monitor_mask was declared RTE_ATOMIC(uint32_t),
fix mistake where pending was not declared RTE_ATOMIC(uint32_t),
remove now unnecessary cast to __rte_atomic of pending (since
the field is now properly declare RTE_ATOMIC).
v2:
* drop the net/sfc driver from the series. the sfc driver
uses generic __atomic_store not handled by the current macros.
the cases where generic __atomic_xxx are used on objects that
can't be accepted by __atomic_xxx_n will be addressed in a
separate series.
Tyler Retzlaff (45):
net/mlx5: use rte stdatomic API
net/ixgbe: use rte stdatomic API
net/iavf: use rte stdatomic API
net/ice: use rte stdatomic API
net/i40e: use rte stdatomic API
net/hns3: use rte stdatomic API
net/bnxt: use rte stdatomic API
net/cpfl: use rte stdatomic API
net/af_xdp: use rte stdatomic API
net/octeon_ep: use rte stdatomic API
net/octeontx: use rte stdatomic API
net/cxgbe: use rte stdatomic API
net/gve: use rte stdatomic API
net/memif: use rte stdatomic API
net/thunderx: use rte stdatomic API
net/virtio: use rte stdatomic API
net/hinic: use rte stdatomic API
net/idpf: use rte stdatomic API
net/qede: use rte stdatomic API
net/ring: use rte stdatomic API
vdpa/mlx5: use rte stdatomic API
raw/ifpga: use rte stdatomic API
event/opdl: use rte stdatomic API
event/octeontx: use rte stdatomic API
event/dsw: use rte stdatomic API
dma/skeleton: use rte stdatomic API
crypto/octeontx: use rte stdatomic API
common/mlx5: use rte stdatomic API
common/idpf: use rte stdatomic API
common/iavf: use rte stdatomic API
baseband/acc: use rte stdatomic API
net/txgbe: use rte stdatomic API
net/null: use rte stdatomic API
event/dlb2: use rte stdatomic API
dma/idxd: use rte stdatomic API
crypto/ccp: use rte stdatomic API
common/cpt: use rte stdatomic API
bus/vmbus: use rte stdatomic API
examples: use rte stdatomic API
app/dumpcap: use rte stdatomic API
app/test: use rte stdatomic API
app/test-eventdev: use rte stdatomic API
app/test-crypto-perf: use rte stdatomic API
app/test-compress-perf: use rte stdatomic API
app/test-bbdev: use rte stdatomic API
app/dumpcap/main.c | 12 +-
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++--------
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 +-
app/test-compress-perf/comp_perf_test_throughput.c | 10 +-
app/test-compress-perf/comp_perf_test_verify.c | 6 +-
app/test-crypto-perf/cperf_test_latency.c | 6 +-
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +-
app/test-crypto-perf/cperf_test_throughput.c | 10 +-
app/test-crypto-perf/cperf_test_verify.c | 10 +-
app/test-eventdev/test_order_atq.c | 4 +-
app/test-eventdev/test_order_common.c | 5 +-
app/test-eventdev/test_order_common.h | 8 +-
app/test-eventdev/test_order_queue.c | 4 +-
app/test-eventdev/test_perf_common.h | 6 +-
app/test/test_bpf.c | 46 ++++--
app/test/test_distributor.c | 114 ++++++-------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++--
app/test/test_hash_multiwriter.c | 16 +-
app/test/test_hash_readwrite.c | 74 ++++-----
app/test/test_hash_readwrite_lf_perf.c | 88 +++++-----
app/test/test_lcores.c | 25 +--
app/test/test_lpm_perf.c | 14 +-
app/test/test_mcslock.c | 12 +-
app/test/test_mempool_perf.c | 9 +-
app/test/test_pflock.c | 13 +-
app/test/test_pmd_perf.c | 10 +-
app/test/test_rcu_qsbr_perf.c | 114 ++++++-------
app/test/test_ring_perf.c | 11 +-
app/test/test_ring_stress_impl.h | 10 +-
app/test/test_rwlock.c | 9 +-
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +--
app/test/test_spinlock.c | 9 +-
app/test/test_stack_perf.c | 12 +-
app/test/test_threads.c | 33 ++--
app/test/test_ticketlock.c | 9 +-
app/test/test_timer.c | 31 ++--
drivers/baseband/acc/rte_acc100_pmd.c | 36 ++--
drivers/baseband/acc/rte_vrb_pmd.c | 47 ++++--
drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
drivers/bus/vmbus/vmbus_channel.c | 8 +-
drivers/common/cpt/cpt_common.h | 2 +-
drivers/common/iavf/iavf_impl.c | 4 +-
drivers/common/idpf/idpf_common_device.h | 6 +-
drivers/common/idpf/idpf_common_rxtx.c | 14 +-
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 +-
drivers/common/mlx5/linux/mlx5_nl.c | 5 +-
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 +-
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 ++--
drivers/common/mlx5/mlx5_common_utils.h | 6 +-
drivers/common/mlx5/mlx5_malloc.c | 58 +++----
drivers/crypto/ccp/ccp_dev.c | 8 +-
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 +-
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +-
drivers/dma/skeleton/skeleton_dmadev.c | 5 +-
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
drivers/event/dlb2/dlb2.c | 34 ++--
drivers/event/dlb2/dlb2_priv.h | 13 +-
drivers/event/dlb2/dlb2_xstats.c | 2 +-
drivers/event/dsw/dsw_evdev.h | 6 +-
drivers/event/dsw/dsw_event.c | 47 ++++--
drivers/event/dsw/dsw_xstats.c | 4 +-
drivers/event/octeontx/timvf_evdev.h | 8 +-
drivers/event/octeontx/timvf_worker.h | 36 ++--
drivers/event/opdl/opdl_ring.c | 80 ++++-----
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 ++-
drivers/net/bnxt/bnxt_cpr.h | 4 +-
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 +-
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 +-
drivers/net/cpfl/cpfl_ethdev.c | 8 +-
drivers/net/cxgbe/clip_tbl.c | 12 +-
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 +--
drivers/net/cxgbe/cxgbe_ofld.h | 6 +-
drivers/net/cxgbe/l2t.c | 12 +-
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +--
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 +-
drivers/net/cxgbe/smt.h | 2 +-
drivers/net/gve/base/gve_osdep.h | 4 +-
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
drivers/net/hns3/hns3_cmd.c | 18 +-
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 ++--
drivers/net/hns3/hns3_ethdev.h | 32 ++--
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++----
drivers/net/hns3/hns3_intr.c | 36 ++--
drivers/net/hns3/hns3_intr.h | 4 +-
drivers/net/hns3/hns3_mbx.c | 6 +-
drivers/net/hns3/hns3_mp.c | 6 +-
drivers/net/hns3/hns3_rxtx.c | 10 +-
drivers/net/hns3/hns3_tm.c | 4 +-
drivers/net/i40e/i40e_ethdev.c | 4 +-
drivers/net/i40e/i40e_rxtx.c | 6 +-
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf.h | 16 +-
drivers/net/iavf/iavf_rxtx.c | 4 +-
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +-
drivers/net/ice/base/ice_osdep.h | 4 +-
drivers/net/ice/ice_dcf.c | 6 +-
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 +-
drivers/net/ice/ice_dcf_parent.c | 16 +-
drivers/net/ice/ice_ethdev.c | 12 +-
drivers/net/ice/ice_ethdev.h | 2 +-
drivers/net/idpf/idpf_ethdev.c | 7 +-
drivers/net/ixgbe/ixgbe_ethdev.c | 14 +-
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 +-
drivers/net/memif/memif.h | 4 +-
drivers/net/memif/rte_eth_memif.c | 50 +++---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 +-
drivers/net/mlx5/mlx5.c | 9 +-
drivers/net/mlx5/mlx5.h | 66 ++++----
drivers/net/mlx5/mlx5_flow.c | 37 +++--
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 +++--
drivers/net/mlx5/mlx5_flow_dv.c | 126 +++++++-------
drivers/net/mlx5/mlx5_flow_flex.c | 14 +-
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++----
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++--
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++--
drivers/net/mlx5/mlx5_hws_cnt.c | 71 ++++----
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +-
drivers/net/mlx5/mlx5_rx.h | 14 +-
drivers/net/mlx5/mlx5_rxq.c | 30 ++--
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 +-
drivers/net/mlx5/mlx5_txpp.c | 84 +++++-----
drivers/net/mlx5/mlx5_txq.c | 12 +-
drivers/net/mlx5/mlx5_utils.c | 10 +-
drivers/net/mlx5/mlx5_utils.h | 4 +-
drivers/net/null/rte_eth_null.c | 12 +-
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +-
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +-
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx2_ep_vf.c | 8 +-
drivers/net/octeon_ep/otx_ep_common.h | 4 +-
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 +-
drivers/net/octeontx/octeontx_ethdev.c | 8 +-
drivers/net/qede/base/bcm_osal.c | 6 +-
drivers/net/ring/rte_eth_ring.c | 8 +-
drivers/net/thunderx/nicvf_rxtx.c | 9 +-
drivers/net/thunderx/nicvf_struct.h | 4 +-
drivers/net/txgbe/txgbe_ethdev.c | 12 +-
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
drivers/net/virtio/virtio_ring.h | 4 +-
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 +-
drivers/net/virtio/virtqueue.h | 32 ++--
drivers/raw/ifpga/ifpga_rawdev.c | 9 +-
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +--
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +-
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++---
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 +-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 +-
examples/bbdev_app/main.c | 13 +-
examples/l2fwd-event/l2fwd_common.h | 4 +-
examples/l2fwd-event/l2fwd_event.c | 24 +--
examples/l2fwd-jobstats/main.c | 11 +-
.../client_server_mp/mp_server/main.c | 6 +-
examples/server_node_efd/efd_server/main.c | 6 +-
examples/vhost/main.c | 32 ++--
examples/vhost/main.h | 4 +-
examples/vhost/virtio_net.c | 13 +-
examples/vhost_blk/vhost_blk.c | 8 +-
examples/vm_power_manager/channel_manager.h | 4 +-
examples/vm_power_manager/channel_monitor.c | 9 +-
examples/vm_power_manager/vm_power_cli.c | 3 +-
182 files changed, 1647 insertions(+), 1502 deletions(-)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 01/45] net/mlx5: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 02/45] net/ixgbe: " Tyler Retzlaff
` (44 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/mlx5/linux/mlx5_ethdev_os.c | 6 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 9 ++-
drivers/net/mlx5/mlx5.c | 9 ++-
drivers/net/mlx5/mlx5.h | 66 ++++++++---------
drivers/net/mlx5/mlx5_flow.c | 37 +++++-----
drivers/net/mlx5/mlx5_flow.h | 8 +-
drivers/net/mlx5/mlx5_flow_aso.c | 43 ++++++-----
drivers/net/mlx5/mlx5_flow_dv.c | 126 ++++++++++++++++----------------
drivers/net/mlx5/mlx5_flow_flex.c | 14 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 61 +++++++++-------
drivers/net/mlx5/mlx5_flow_meter.c | 30 ++++----
drivers/net/mlx5/mlx5_flow_quota.c | 32 ++++----
drivers/net/mlx5/mlx5_hws_cnt.c | 71 +++++++++---------
drivers/net/mlx5/mlx5_hws_cnt.h | 10 +--
drivers/net/mlx5/mlx5_rx.h | 14 ++--
drivers/net/mlx5/mlx5_rxq.c | 30 ++++----
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_tx.h | 18 ++---
drivers/net/mlx5/mlx5_txpp.c | 84 ++++++++++-----------
drivers/net/mlx5/mlx5_txq.c | 12 +--
drivers/net/mlx5/mlx5_utils.c | 10 +--
drivers/net/mlx5/mlx5_utils.h | 4 +-
22 files changed, 351 insertions(+), 345 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index 40ea9d2..70bba6c 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -1918,9 +1918,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
return -ENOTSUP;
}
/* Check there is no concurrent mapping in other thread. */
- if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
- base, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
+ base,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
return 0;
}
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index b54f3cc..63da8f4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1117,7 +1117,7 @@
return 0;
}
/* Only need to check refcnt, 0 after "sh" is allocated. */
- if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+ if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed))) {
MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
priv->lb_used = 1;
return 0;
@@ -1163,7 +1163,7 @@
claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
sh->self_lb.ibv_cq = NULL;
}
- __atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, rte_memory_order_relaxed);
return -rte_errno;
#else
RTE_SET_USED(dev);
@@ -1186,8 +1186,9 @@
if (!priv->lb_used)
return;
- MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
- if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 1)) {
+ MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, rte_memory_order_relaxed));
+ if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
if (sh->self_lb.qp) {
claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
sh->self_lb.qp = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d1a6382..2ff94db 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -855,8 +855,8 @@
ct_pool = mng->pools[idx];
for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
ct = &ct_pool->actions[i];
- val = __atomic_fetch_sub(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
MLX5_ASSERT(val == 1);
if (val > 1)
cnt++;
@@ -1082,7 +1082,8 @@
DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
return -ENOTSUP;
}
- if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, __ATOMIC_RELAXED) + 1 > 1)
+ if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
+ rte_memory_order_relaxed) + 1 > 1)
return 0;
priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
@@ -1173,7 +1174,7 @@
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_internal_flex_parser_profile *fp = &priv->sh->srh_flex_parser;
- if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, rte_memory_order_relaxed) - 1)
return;
mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
mlx5_free(fp->flex.devx_fp);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3646d20..9e4a5fe 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -378,7 +378,7 @@ struct mlx5_drop {
struct mlx5_lb_ctx {
struct ibv_qp *qp; /* QP object. */
void *ibv_cq; /* Completion queue. */
- uint16_t refcnt; /* Reference count for representors. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
};
/* HW steering queue job descriptor type. */
@@ -481,10 +481,10 @@ enum mlx5_counter_type {
/* Counter age parameter. */
struct mlx5_age_param {
- uint16_t state; /**< Age state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
uint16_t port_id; /**< Port id of the counter. */
uint32_t timeout:24; /**< Aging timeout in seconds. */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/**< Time in seconds since last hit (atomically accessed). */
void *context; /**< Flow counter age context. */
};
@@ -497,7 +497,7 @@ struct flow_counter_stats {
/* Shared counters information for counters. */
struct mlx5_flow_counter_shared {
union {
- uint32_t refcnt; /* Only for shared action management. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action management. */
uint32_t id; /* User counter ID for legacy sharing. */
};
};
@@ -588,7 +588,7 @@ struct mlx5_counter_stats_raw {
/* Counter global management structure. */
struct mlx5_flow_counter_mng {
- volatile uint16_t n_valid; /* Number of valid pools. */
+ volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
uint16_t last_pool_idx; /* Last used pool index */
int min_id; /* The minimum counter ID in the pools. */
int max_id; /* The maximum counter ID in the pools. */
@@ -654,7 +654,7 @@ struct mlx5_aso_sq {
struct mlx5_aso_age_action {
LIST_ENTRY(mlx5_aso_age_action) next;
void *dr_action;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
/* Following fields relevant only when action is active. */
uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
struct mlx5_age_param age_params;
@@ -688,7 +688,7 @@ struct mlx5_geneve_tlv_option_resource {
rte_be16_t option_class; /* geneve tlv opt class.*/
uint8_t option_type; /* geneve tlv opt type.*/
uint8_t length; /* geneve tlv opt length. */
- uint32_t refcnt; /* geneve tlv object reference counter */
+ RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
};
@@ -903,7 +903,7 @@ struct mlx5_flow_meter_policy {
uint16_t group;
/* The group. */
rte_spinlock_t sl;
- uint32_t ref_cnt;
+ RTE_ATOMIC(uint32_t) ref_cnt;
/* Use count. */
struct rte_flow_pattern_template *hws_item_templ;
/* Hardware steering item templates. */
@@ -1038,7 +1038,7 @@ struct mlx5_flow_meter_profile {
struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
/**< srtcm_rfc2697 struct. */
};
- uint32_t ref_cnt; /**< Use count. */
+ RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
uint32_t g_support:1; /**< If G color will be generated. */
uint32_t y_support:1; /**< If Y color will be generated. */
uint32_t initialized:1; /**< Initialized. */
@@ -1078,7 +1078,7 @@ struct mlx5_aso_mtr {
enum mlx5_aso_mtr_type type;
struct mlx5_flow_meter_info fm;
/**< Pointer to the next aso flow meter structure. */
- uint8_t state; /**< ASO flow meter state. */
+ RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
uint32_t offset;
enum rte_color init_color;
};
@@ -1124,7 +1124,7 @@ struct mlx5_flow_mtr_mng {
/* Default policy table. */
uint32_t def_policy_id;
/* Default policy id. */
- uint32_t def_policy_ref_cnt;
+ RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
/** def_policy meter use count. */
struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
/* Meter drop table. */
@@ -1197,8 +1197,8 @@ struct mlx5_txpp_wq {
/* Tx packet pacing internal timestamp. */
struct mlx5_txpp_ts {
- uint64_t ci_ts;
- uint64_t ts;
+ RTE_ATOMIC(uint64_t) ci_ts;
+ RTE_ATOMIC(uint64_t) ts;
};
/* Tx packet pacing structure. */
@@ -1221,12 +1221,12 @@ struct mlx5_dev_txpp {
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
/* Statistics counters. */
- uint64_t err_miss_int; /* Missed service interrupt. */
- uint64_t err_rearm_queue; /* Rearm Queue errors. */
- uint64_t err_clock_queue; /* Clock Queue errors. */
- uint64_t err_ts_past; /* Timestamp in the past. */
- uint64_t err_ts_future; /* Timestamp in the distant future. */
- uint64_t err_ts_order; /* Timestamp not in ascending order. */
+ RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
+ RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
+ RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
+ RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
+ RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. */
+ RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. */
};
/* Sample ID information of eCPRI flex parser structure. */
@@ -1287,16 +1287,16 @@ struct mlx5_aso_ct_action {
void *dr_action_orig;
/* General action object for reply dir. */
void *dr_action_rply;
- uint32_t refcnt; /* Action used count in device flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */
uint16_t peer; /* The only peer port index could also use this CT. */
- enum mlx5_aso_ct_state state; /* ASO CT state. */
+ RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
bool is_original; /* The direction of the DR action to be used. */
};
/* CT action object state update. */
#define MLX5_ASO_CT_UPDATE_STATE(c, s) \
- __atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+ rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -1370,7 +1370,7 @@ struct mlx5_flex_pattern_field {
/* Port flex item context. */
struct mlx5_flex_item {
struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
- uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
uint32_t mapnum; /* Number of pattern translation entries. */
struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
@@ -1383,7 +1383,7 @@ struct mlx5_flex_item {
#define MLX5_SRV6_SAMPLE_NUM 5
/* Mlx5 internal flex parser profile structure. */
struct mlx5_internal_flex_parser_profile {
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
struct mlx5_flex_item flex; /* Hold map info for modify field. */
};
@@ -1512,9 +1512,9 @@ struct mlx5_dev_ctx_shared {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
struct mlx5_send_to_kernel_action send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
#endif
- struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
- struct mlx5_hlist *modify_cmds;
- struct mlx5_hlist *tag_table;
+ RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action hash list. */
+ RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
+ RTE_ATOMIC(struct mlx5_hlist *) tag_table;
struct mlx5_list *port_id_action_list; /* Port ID action list. */
struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
struct mlx5_list *sample_action_list; /* List of sample actions. */
@@ -1525,7 +1525,7 @@ struct mlx5_dev_ctx_shared {
/* SW steering counters management structure. */
void *default_miss_action; /* Default miss action. */
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
- struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
+ RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
/* Shared interrupt handler section. */
struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
@@ -1570,7 +1570,7 @@ struct mlx5_dev_ctx_shared {
* Caution, secondary process may rebuild the struct during port start.
*/
struct mlx5_proc_priv {
- void *hca_bar;
+ RTE_ATOMIC(void *) hca_bar;
/* Mapped HCA PCI BAR area. */
size_t uar_table_sz;
/* Size of UAR register table. */
@@ -1635,7 +1635,7 @@ struct mlx5_rxq_obj {
/* Indirection table. */
struct mlx5_ind_table_obj {
LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
union {
void *ind_table; /**< Indirection table. */
struct mlx5_devx_obj *rqt; /* DevX RQT object. */
@@ -1826,7 +1826,7 @@ enum mlx5_quota_state {
};
struct mlx5_quota {
- uint8_t state; /* object state */
+ RTE_ATOMIC(uint8_t) state; /* object state */
uint8_t mode; /* metering mode */
/**
* Keep track of application update types.
@@ -1955,7 +1955,7 @@ struct mlx5_priv {
uint32_t flex_item_map; /* Map of allocated flex item elements. */
uint32_t nb_queue; /* HW steering queue number. */
struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
- uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
+ RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference counter. */
struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */
struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -2007,7 +2007,7 @@ struct mlx5_priv {
#endif
struct rte_eth_dev *shared_host; /* Host device for HW steering. */
- uint16_t shared_refcnt; /* HW steering host reference counter. */
+ RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f31fdfb..1954975 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4623,8 +4623,8 @@ struct mlx5_translated_action_handle {
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
- __atomic_fetch_add(&shared_rss->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
+ rte_memory_order_relaxed);
return idx;
default:
break;
@@ -7459,7 +7459,7 @@ struct mlx5_list_entry *
if (tunnel) {
flow->tunnel = 1;
flow->tunnel_id = tunnel->tunnel_id;
- __atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed);
mlx5_free(default_miss_ctx.queue);
}
mlx5_flow_pop_thread_workspace();
@@ -7470,10 +7470,10 @@ struct mlx5_list_entry *
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
if (rss_desc->shared_rss)
- __atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
+ rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
+ rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
@@ -7976,7 +7976,8 @@ struct rte_flow *
tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
RTE_VERIFY(tunnel);
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
+ rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
flow_mreg_del_copy_action(dev, flow);
@@ -9456,7 +9457,7 @@ struct mlx5_flow_workspace*
{
uint32_t pools_n, us;
- pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
+ pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, rte_memory_order_relaxed);
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -9558,17 +9559,17 @@ struct mlx5_flow_workspace*
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
age_param = MLX5_CNT_TO_AGE(cnt);
- if (__atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) != AGE_CANDIDATE)
+ if (rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) != AGE_CANDIDATE)
continue;
if (cur->data[i].hits != prev->data[i].hits) {
- __atomic_store_n(&age_param->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
continue;
}
- if (__atomic_fetch_add(&age_param->sec_since_last_hit,
+ if (rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
time_delta,
- __ATOMIC_RELAXED) + time_delta <= age_param->timeout)
+ rte_memory_order_relaxed) + time_delta <= age_param->timeout)
continue;
/**
* Hold the lock first, or if between the
@@ -9579,10 +9580,10 @@ struct mlx5_flow_workspace*
priv = rte_eth_devices[age_param->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_TMOUT, false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_TMOUT,
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
}
@@ -11407,7 +11408,7 @@ struct tunnel_db_element_release_ctx {
{
struct tunnel_db_element_release_ctx *ctx = x;
ctx->ret = 0;
- if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, rte_memory_order_relaxed) - 1))
mlx5_flow_tunnel_free(dev, tunnel);
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index cc1e8cf..9256aec 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1049,7 +1049,7 @@ struct mlx5_flow_tunnel {
LIST_ENTRY(mlx5_flow_tunnel) chain;
struct rte_flow_tunnel app_tunnel; /** app tunnel copy */
uint32_t tunnel_id; /** unique tunnel ID */
- uint32_t refctn;
+ RTE_ATOMIC(uint32_t) refctn;
struct rte_flow_action action;
struct rte_flow_item item;
struct mlx5_hlist *groups; /** tunnel groups */
@@ -1470,7 +1470,7 @@ struct rte_flow_pattern_template {
struct mlx5dr_match_template *mt; /* mlx5 match template. */
uint64_t item_flags; /* Item layer flags. */
uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
/*
* If true, then rule pattern should be prepended with
* represented_port pattern item.
@@ -1502,7 +1502,7 @@ struct rte_flow_actions_template {
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint16_t recom_off; /* Offset of DR IPv6 routing push remove action. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
uint8_t flex_item; /* flex item index. */
};
@@ -1855,7 +1855,7 @@ struct rte_flow_template_table {
/* Shared RSS action structure */
struct mlx5_shared_action_rss {
ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
- uint32_t refcnt; /**< Atomically accessed refcnt. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
struct rte_flow_action_rss origin; /**< Original rte RSS action. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
struct mlx5_ind_table_obj *ind_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index ab9eb21..a94b228 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -619,7 +619,7 @@
uint8_t *u8addr;
uint8_t hit;
- if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&ap->state, rte_memory_order_relaxed) !=
AGE_CANDIDATE)
continue;
byte = 63 - (j / 8);
@@ -627,13 +627,13 @@
u8addr = (uint8_t *)addr;
hit = (u8addr[byte] >> offset) & 0x1;
if (hit) {
- __atomic_store_n(&ap->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
} else {
struct mlx5_priv *priv;
- __atomic_fetch_add(&ap->sec_since_last_hit,
- diff, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
+ diff, rte_memory_order_relaxed);
/* If timeout passed add to aged-out list. */
if (ap->sec_since_last_hit <= ap->timeout)
continue;
@@ -641,12 +641,11 @@
rte_eth_devices[ap->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- if (__atomic_compare_exchange_n(&ap->state,
+ if (rte_atomic_compare_exchange_strong_explicit(&ap->state,
&expected,
AGE_TMOUT,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
LIST_INSERT_HEAD(&age_info->aged_aso,
act, next);
MLX5_AGE_SET(age_info,
@@ -946,10 +945,10 @@
for (i = 0; i < n; ++i) {
aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
MLX5_ASSERT(aso_mtr);
- verdict = __atomic_compare_exchange_n(&aso_mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
}
sq->tail += n;
@@ -1005,10 +1004,10 @@
mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,
MLX5_INDIRECT_ACTION_IDX_GET(job->action));
MLX5_ASSERT(mtr);
- verdict = __atomic_compare_exchange_n(&mtr->state,
+ verdict = rte_atomic_compare_exchange_strong_explicit(&mtr->state,
&exp_state, ASO_METER_READY,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
MLX5_ASSERT(verdict);
flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
}
@@ -1103,7 +1102,7 @@
struct mlx5_aso_sq *sq;
struct mlx5_dev_ctx_shared *sh = priv->sh;
uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
- uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+ uint8_t state = rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed);
poll_cq_t poll_mtr_cq =
is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws;
@@ -1112,7 +1111,7 @@
sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);
do {
poll_mtr_cq(priv, sq);
- if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&mtr->state, rte_memory_order_relaxed) ==
ASO_METER_READY)
return 0;
/* Waiting for CQE ready. */
@@ -1411,7 +1410,7 @@
uint16_t wqe_idx;
struct mlx5_aso_ct_pool *pool;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_FREE) {
DRV_LOG(ERR, "Fail: No context to query");
@@ -1620,12 +1619,12 @@
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
else
sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
ASO_CONNTRACK_READY)
return 0;
/* Waiting for CQE ready, consider should block or sleep. */
@@ -1791,7 +1790,7 @@
bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (sh->config.dv_flow_en == 2)
sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
@@ -1807,7 +1806,7 @@
}
do {
mlx5_aso_ct_completion_handle(sh, sq, need_lock);
- state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ state = rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
if (state == ASO_CONNTRACK_READY ||
state == ASO_CONNTRACK_QUERY)
return 0;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d434c67..f9c56af 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -313,7 +313,7 @@ enum mlx5_l3_tunnel_detection {
}
static inline struct mlx5_hlist *
-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct mlx5_hlist *) *phl,
const char *name, uint32_t size, bool direct_key,
bool lcores_share, void *ctx,
mlx5_list_create_cb cb_create,
@@ -327,7 +327,7 @@ enum mlx5_l3_tunnel_detection {
struct mlx5_hlist *expected = NULL;
char s[MLX5_NAME_SIZE];
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
if (likely(hl))
return hl;
snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
@@ -341,11 +341,11 @@ enum mlx5_l3_tunnel_detection {
"cannot allocate resource memory");
return NULL;
}
- if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_hlist_destroy(hl);
- hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+ hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
}
return hl;
}
@@ -6139,8 +6139,8 @@ struct mlx5_list_entry *
static struct mlx5_indexed_pool *
flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
{
- struct mlx5_indexed_pool *ipool = __atomic_load_n
- (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+ struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
+ (&sh->mdh_ipools[index], rte_memory_order_seq_cst);
if (!ipool) {
struct mlx5_indexed_pool *expected = NULL;
@@ -6165,13 +6165,13 @@ struct mlx5_list_entry *
ipool = mlx5_ipool_create(&cfg);
if (!ipool)
return NULL;
- if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
- &expected, ipool, false,
- __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
+ &expected, ipool,
+ rte_memory_order_seq_cst,
+ rte_memory_order_seq_cst)) {
mlx5_ipool_destroy(ipool);
- ipool = __atomic_load_n(&sh->mdh_ipools[index],
- __ATOMIC_SEQ_CST);
+ ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
+ rte_memory_order_seq_cst);
}
}
return ipool;
@@ -6992,9 +6992,9 @@ struct mlx5_list_entry *
age_info = GET_PORT_AGE_INFO(priv);
age_param = flow_dv_counter_idx_get_age(dev, counter);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since counter may still in process.
@@ -7002,7 +7002,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -7038,8 +7038,8 @@ struct mlx5_list_entry *
* indirect action API, shared info is 1 before the reduction,
* so this condition is failed and function doesn't return here.
*/
- if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
- __ATOMIC_RELAXED) - 1)
+ if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
+ rte_memory_order_relaxed) - 1)
return;
}
cnt->pool = pool;
@@ -10203,8 +10203,8 @@ struct mlx5_list_entry *
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
} else {
ret = rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -10243,8 +10243,8 @@ struct mlx5_list_entry *
geneve_opt_resource->option_class = geneve_opt_v->option_class;
geneve_opt_resource->option_type = geneve_opt_v->option_type;
geneve_opt_resource->length = geneve_opt_v->option_len;
- __atomic_store_n(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed);
}
exit:
rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
@@ -12192,8 +12192,8 @@ struct mlx5_list_entry *
(void *)(uintptr_t)(dev_flow->flow_idx);
age_param->timeout = age->timeout;
age_param->port_id = dev->data->port_id;
- __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, rte_memory_order_relaxed);
return counter;
}
@@ -13241,9 +13241,9 @@ struct mlx5_list_entry *
uint16_t expected = AGE_CANDIDATE;
age_info = GET_PORT_AGE_INFO(priv);
- if (!__atomic_compare_exchange_n(&age_param->state, &expected,
- AGE_FREE, false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+ AGE_FREE, rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
/**
* We need the lock even it is age timeout,
* since age action may still in process.
@@ -13251,7 +13251,7 @@ struct mlx5_list_entry *
rte_spinlock_lock(&age_info->aged_sl);
LIST_REMOVE(age, next);
rte_spinlock_unlock(&age_info->aged_sl);
- __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_param->state, AGE_FREE, rte_memory_order_relaxed);
}
}
@@ -13275,7 +13275,7 @@ struct mlx5_list_entry *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
- uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
flow_dv_aso_age_remove_from_age(dev, age);
@@ -13451,7 +13451,7 @@ struct mlx5_list_entry *
return 0; /* 0 is an error. */
}
}
- __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&age_free->refcnt, 1, rte_memory_order_relaxed);
return pool->index | ((age_free->offset + 1) << 16);
}
@@ -13481,10 +13481,10 @@ struct mlx5_list_entry *
aso_age->age_params.context = context;
aso_age->age_params.timeout = timeout;
aso_age->age_params.port_id = dev->data->port_id;
- __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
- __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
static void
@@ -13666,12 +13666,12 @@ struct mlx5_list_entry *
uint32_t ret;
struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
enum mlx5_aso_ct_state state =
- __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed);
/* Cannot release when CT is in the ASO SQ. */
if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
return -1;
- ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret) {
if (ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -13861,7 +13861,7 @@ struct mlx5_list_entry *
pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
/* 0: inactive, 1: created, 2+: used by flows. */
- __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
if (!ct->dr_action_orig) {
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -14813,8 +14813,8 @@ struct mlx5_list_entry *
age_act = flow_aso_age_get_by_idx(dev, owner_idx);
if (flow->age == 0) {
flow->age = owner_idx;
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&age_act->refcnt, 1,
+ rte_memory_order_relaxed);
}
age_act_pos = actions_n++;
action_flags |= MLX5_FLOW_ACTION_AGE;
@@ -14851,9 +14851,9 @@ struct mlx5_list_entry *
} else {
if (flow->counter == 0) {
flow->counter = owner_idx;
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&cnt_act->shared_info.refcnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
}
/* Save information first, will apply later. */
action_flags |= MLX5_FLOW_ACTION_COUNT;
@@ -15185,8 +15185,8 @@ struct mlx5_list_entry *
flow->indirect_type =
MLX5_INDIRECT_ACTION_TYPE_CT;
flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
+ rte_memory_order_relaxed);
}
actions_n++;
action_flags |= MLX5_FLOW_ACTION_CT;
@@ -15855,7 +15855,7 @@ struct mlx5_list_entry *
shared_rss = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
- __atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
}
void
@@ -16038,8 +16038,8 @@ struct mlx5_list_entry *
sh->geneve_tlv_option_resource;
rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
if (geneve_opt_resource) {
- if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
- __ATOMIC_RELAXED) - 1)) {
+ if (!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
+ rte_memory_order_relaxed) - 1)) {
claim_zero(mlx5_devx_cmd_destroy
(geneve_opt_resource->obj));
mlx5_free(sh->geneve_tlv_option_resource);
@@ -16448,7 +16448,7 @@ struct mlx5_list_entry *
/* Update queue with indirect table queue memoyr. */
origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
- __atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
@@ -16494,9 +16494,9 @@ struct mlx5_list_entry *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"invalid shared action");
- if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
- 0, 0, __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, &old_refcnt,
+ 0, rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -16632,10 +16632,10 @@ struct rte_flow_action_handle *
return __flow_dv_action_rss_release(dev, idx, error);
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
- if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
- &no_flow_refcnt, 1, false,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED))
+ if (!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
+ &no_flow_refcnt, 1,
+ rte_memory_order_acquire,
+ rte_memory_order_relaxed))
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
@@ -17595,13 +17595,13 @@ struct rte_flow_action_handle *
case MLX5_INDIRECT_ACTION_TYPE_AGE:
age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
resp = data;
- resp->aged = __atomic_load_n(&age_param->state,
- __ATOMIC_RELAXED) == AGE_TMOUT ?
+ resp->aged = rte_atomic_load_explicit(&age_param->state,
+ rte_memory_order_relaxed) == AGE_TMOUT ?
1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_COUNT:
return flow_dv_query_count(dev, idx, data, error);
@@ -17678,12 +17678,12 @@ struct rte_flow_action_handle *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
}
- resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+ resp->aged = rte_atomic_load_explicit(&age_param->state, rte_memory_order_relaxed) ==
AGE_TMOUT ? 1 : 0;
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (&age_param->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 4ae03a2..8a02247 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -86,7 +86,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map |= 1u << idx;
}
}
@@ -107,7 +107,7 @@
MLX5_ASSERT(!item->refcnt);
MLX5_ASSERT(!item->devx_fp);
item->devx_fp = NULL;
- __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&item->refcnt, 0, rte_memory_order_release);
priv->flex_item_map &= ~(1u << idx);
rte_spinlock_unlock(&priv->flex_item_sl);
}
@@ -379,7 +379,7 @@
return ret;
}
if (acquire)
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return ret;
}
@@ -414,7 +414,7 @@
rte_errno = -EINVAL;
return -EINVAL;
}
- __atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, rte_memory_order_release);
return 0;
}
@@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *
}
flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
/* Mark initialized flex item valid. */
- __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&flex->refcnt, 1, rte_memory_order_release);
return (struct rte_flow_item_flex_handle *)flex;
error:
@@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
"invalid flex item handle value");
}
- if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, &old_refcnt, 0,
+ rte_memory_order_acquire, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&priv->flex_item_sl);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9ebbe66..8891f3c 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -715,7 +715,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
}
if (acts->mark)
- if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
+ if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed) - 1))
flow_hw_rxq_flag_set(dev, false);
if (acts->jump) {
@@ -2298,7 +2299,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
goto err;
acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
- __atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
+ rte_memory_order_relaxed);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
@@ -4537,8 +4539,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
uint8_t i;
for (i = 0; i < nb_action_templates; i++) {
- uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED);
+ uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (refcnt <= 1) {
rte_flow_error_set(error, EINVAL,
@@ -4576,8 +4578,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
while (i--) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_sub_fetch(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
return rte_errno;
}
@@ -4748,8 +4750,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
- ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
- __ATOMIC_RELAXED) + 1;
+ ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
+ rte_memory_order_relaxed) + 1;
if (ret <= 1) {
rte_errno = EINVAL;
goto it_error;
@@ -4800,14 +4802,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
at_error:
for (i = 0; i < nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
- __atomic_fetch_sub(&action_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
}
i = nb_item_templates;
it_error:
while (i--)
- __atomic_fetch_sub(&item_templates[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
+ 1, rte_memory_order_relaxed);
error:
err = rte_errno;
if (tbl) {
@@ -5039,12 +5041,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
}
LIST_REMOVE(table, next);
for (i = 0; i < table->nb_item_templates; i++)
- __atomic_fetch_sub(&table->its[i]->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
+ 1, rte_memory_order_relaxed);
for (i = 0; i < table->nb_action_templates; i++) {
__flow_hw_action_template_destroy(dev, &table->ats[i].acts);
- __atomic_fetch_sub(&table->ats[i].action_template->refcnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
+ 1, rte_memory_order_relaxed);
}
flow_hw_destroy_table_multi_pattern_ctx(table);
if (table->matcher_info[0].matcher)
@@ -7287,7 +7289,7 @@ enum mlx5_hw_indirect_list_relative_position {
if (!at->tmpl)
goto error;
at->action_flags = action_flags;
- __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
return at;
error:
@@ -7323,7 +7325,7 @@ enum mlx5_hw_indirect_list_relative_position {
uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Action template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -7897,7 +7899,7 @@ enum mlx5_hw_indirect_list_relative_position {
break;
}
}
- __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
rte_errno = pattern_template_validate(dev, &it, 1);
if (rte_errno)
goto error;
@@ -7933,7 +7935,7 @@ enum mlx5_hw_indirect_list_relative_position {
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
DRV_LOG(WARNING, "Item template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
@@ -10513,7 +10515,8 @@ struct mlx5_list_entry *
}
dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
priv->shared_host = host_dev;
- __atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
}
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
@@ -10698,7 +10701,8 @@ struct mlx5_list_entry *
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
if (priv->hw_q) {
@@ -10814,7 +10818,8 @@ struct mlx5_list_entry *
priv->hw_q = NULL;
if (priv->shared_host) {
struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+ rte_memory_order_relaxed);
priv->shared_host = NULL;
}
mlx5_free(priv->hw_attr);
@@ -10872,8 +10877,8 @@ struct mlx5_list_entry *
NULL,
"Invalid CT destruction index");
}
- __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
+ rte_memory_order_relaxed);
mlx5_ipool_free(pool->cts, idx);
return 0;
}
@@ -11572,7 +11577,7 @@ struct mlx5_hw_q_job *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "age data not available");
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_REPORTED:
case HWS_AGE_AGED_OUT_NOT_REPORTED:
resp->aged = 1;
@@ -11592,8 +11597,8 @@ struct mlx5_hw_q_job *
}
resp->sec_since_last_hit_valid = !resp->aged;
if (resp->sec_since_last_hit_valid)
- resp->sec_since_last_hit = __atomic_load_n
- (¶m->sec_since_last_hit, __ATOMIC_RELAXED);
+ resp->sec_since_last_hit = rte_atomic_load_explicit
+ (¶m->sec_since_last_hit, rte_memory_order_relaxed);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index ca361f7..da3289b 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -2055,9 +2055,9 @@ struct mlx5_flow_meter_policy *
NULL, "Meter profile id not valid.");
/* Meter policy must exist. */
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
- __atomic_fetch_add
+ rte_atomic_fetch_add_explicit
(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ 1, rte_memory_order_relaxed);
domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
if (!priv->sh->config.dv_esw_en)
domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -2137,7 +2137,7 @@ struct mlx5_flow_meter_policy *
fm->is_enable = params->meter_enable;
fm->shared = !!shared;
fm->color_aware = !!params->use_prev_mtr_color;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
fm->def_policy = 1;
fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
@@ -2166,7 +2166,7 @@ struct mlx5_flow_meter_policy *
}
fm->active_state = params->meter_enable;
if (mtr_policy)
- __atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
error:
mlx5_flow_destroy_mtr_tbls(dev, fm);
@@ -2271,8 +2271,8 @@ struct mlx5_flow_meter_policy *
NULL, "Failed to create devx meter.");
}
fm->active_state = params->meter_enable;
- __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
- __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, rte_memory_order_relaxed);
return 0;
}
#endif
@@ -2295,7 +2295,7 @@ struct mlx5_flow_meter_policy *
if (fmp == NULL)
return -1;
/* Update dependencies. */
- __atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, rte_memory_order_relaxed);
fm->profile = NULL;
/* Remove from list. */
if (!priv->sh->meter_aso_en) {
@@ -2313,15 +2313,15 @@ struct mlx5_flow_meter_policy *
}
mlx5_flow_destroy_mtr_tbls(dev, fm);
if (fm->def_policy)
- __atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
+ 1, rte_memory_order_relaxed);
if (priv->sh->meter_aso_en) {
if (!fm->def_policy) {
mtr_policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
if (mtr_policy)
- __atomic_fetch_sub(&mtr_policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
fm->policy_id = 0;
}
fm->def_policy = 0;
@@ -2424,13 +2424,13 @@ struct mlx5_flow_meter_policy *
RTE_MTR_ERROR_TYPE_UNSPECIFIED,
NULL, "Meter object is being used.");
/* Destroy the meter profile. */
- __atomic_fetch_sub(&fm->profile->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
+ 1, rte_memory_order_relaxed);
/* Destroy the meter policy. */
policy = mlx5_flow_meter_policy_find(dev,
fm->policy_id, NULL);
- __atomic_fetch_sub(&policy->ref_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
+ 1, rte_memory_order_relaxed);
memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_quota.c b/drivers/net/mlx5/mlx5_flow_quota.c
index 14a2a8b..6ad0e8a 100644
--- a/drivers/net/mlx5/mlx5_flow_quota.c
+++ b/drivers/net/mlx5/mlx5_flow_quota.c
@@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
struct mlx5_quota *quota_obj =
sq->elts[(sq->tail + i) & mask].quota_obj;
- __atomic_compare_exchange_n("a_obj->state, &state,
- MLX5_QUOTA_STATE_READY, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit("a_obj->state, &state,
+ MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
}
}
@@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
rte_spinlock_lock(&sq->sqsl);
mlx5_quota_cmd_completion_handle(sq);
rte_spinlock_unlock(&sq->sqsl);
- if (__atomic_load_n("a_obj->state, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit("a_obj->state, rte_memory_order_relaxed) ==
MLX5_QUOTA_STATE_READY)
return 0;
} while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
{
uint8_t state = MLX5_QUOTA_STATE_READY;
- bool verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ bool verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict)
return rte_flow_error_set(error, EBUSY,
@@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, work_queue,
async_job ? async_job : &sync_job, push, NULL);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct mlx5_aso_wqe *restrict,
async_job ? async_job : &sync_job, push,
(void *)(uintptr_t)update->conf);
if (ret) {
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+ rte_memory_order_relaxed);
return rte_flow_error_set(error, EAGAIN,
RTE_FLOW_ERROR_TYPE_ACTION, NULL, "try again");
}
@@ -593,9 +593,9 @@ struct rte_flow_action_handle *
NULL, "quota: failed to allocate quota object");
return NULL;
}
- verdict = __atomic_compare_exchange_n
- (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ verdict = rte_atomic_compare_exchange_strong_explicit
+ (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (!verdict) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: new quota object has invalid state");
@@ -616,8 +616,8 @@ struct rte_flow_action_handle *
(void *)(uintptr_t)conf);
if (ret) {
mlx5_ipool_free(qctx->quota_ipool, id);
- __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
+ rte_memory_order_relaxed);
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "quota: WR failure");
return 0;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index c31f2f3..1b625e0 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -149,7 +149,7 @@
}
if (param->timeout == 0)
continue;
- switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
+ switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
case HWS_AGE_AGED_OUT_NOT_REPORTED:
case HWS_AGE_AGED_OUT_REPORTED:
/* Already aged-out, no action is needed. */
@@ -171,8 +171,8 @@
hits = rte_be_to_cpu_64(stats[i].hits);
if (param->nb_cnts == 1) {
if (hits != param->accumulator_last_hits) {
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
param->accumulator_last_hits = hits;
continue;
}
@@ -184,8 +184,8 @@
param->accumulator_cnt = 0;
if (param->accumulator_last_hits !=
param->accumulator_hits) {
- __atomic_store_n(¶m->sec_since_last_hit,
- 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit,
+ 0, rte_memory_order_relaxed);
param->accumulator_last_hits =
param->accumulator_hits;
param->accumulator_hits = 0;
@@ -193,9 +193,9 @@
}
param->accumulator_hits = 0;
}
- if (__atomic_fetch_add(¶m->sec_since_last_hit, time_delta,
- __ATOMIC_RELAXED) + time_delta <=
- __atomic_load_n(¶m->timeout, __ATOMIC_RELAXED))
+ if (rte_atomic_fetch_add_explicit(¶m->sec_since_last_hit, time_delta,
+ rte_memory_order_relaxed) + time_delta <=
+ rte_atomic_load_explicit(¶m->timeout, rte_memory_order_relaxed))
continue;
/* Prepare the relevant ring for this AGE parameter */
if (priv->hws_strict_queue)
@@ -203,10 +203,10 @@
else
r = age_info->hw_age.aged_list;
/* Changing the state atomically and insert it into the ring. */
- if (__atomic_compare_exchange_n(¶m->state, &expected1,
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected1,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED)) {
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed)) {
int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
sizeof(uint32_t),
1, NULL);
@@ -221,11 +221,10 @@
*/
expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
if (ret == 0 &&
- !__atomic_compare_exchange_n(¶m->state,
+ !rte_atomic_compare_exchange_strong_explicit(¶m->state,
&expected2, expected1,
- false,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected2 == HWS_AGE_FREE)
mlx5_hws_age_param_free(priv,
param->own_cnt_index,
@@ -235,10 +234,10 @@
if (!priv->hws_strict_queue)
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
} else {
- __atomic_compare_exchange_n(¶m->state, &expected2,
+ rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected2,
HWS_AGE_AGED_OUT_NOT_REPORTED,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed);
}
}
/* The event is irrelevant in strict queue mode. */
@@ -796,8 +795,8 @@ struct mlx5_hws_cnt_pool *
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
- switch (__atomic_exchange_n(¶m->state, HWS_AGE_FREE,
- __ATOMIC_RELAXED)) {
+ switch (rte_atomic_exchange_explicit(¶m->state, HWS_AGE_FREE,
+ rte_memory_order_relaxed)) {
case HWS_AGE_CANDIDATE:
case HWS_AGE_AGED_OUT_REPORTED:
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
@@ -862,8 +861,8 @@ struct mlx5_hws_cnt_pool *
"cannot allocate AGE parameter");
return 0;
}
- MLX5_ASSERT(__atomic_load_n(¶m->state,
- __ATOMIC_RELAXED) == HWS_AGE_FREE);
+ MLX5_ASSERT(rte_atomic_load_explicit(¶m->state,
+ rte_memory_order_relaxed) == HWS_AGE_FREE);
if (shared) {
param->nb_cnts = 0;
param->accumulator_hits = 0;
@@ -914,9 +913,9 @@ struct mlx5_hws_cnt_pool *
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"invalid AGE parameter index");
if (update_ade->timeout_valid) {
- uint32_t old_timeout = __atomic_exchange_n(¶m->timeout,
+ uint32_t old_timeout = rte_atomic_exchange_explicit(¶m->timeout,
update_ade->timeout,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (old_timeout == 0)
sec_since_last_hit_reset = true;
@@ -935,8 +934,8 @@ struct mlx5_hws_cnt_pool *
state_update = true;
}
if (sec_since_last_hit_reset)
- __atomic_store_n(¶m->sec_since_last_hit, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->sec_since_last_hit, 0,
+ rte_memory_order_relaxed);
if (state_update) {
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
@@ -945,13 +944,13 @@ struct mlx5_hws_cnt_pool *
* - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
* - AGED_OUT_REPORTED -> CANDIDATE
*/
- if (!__atomic_compare_exchange_n(¶m->state, &expected,
+ if (!rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
HWS_AGE_CANDIDATE_INSIDE_RING,
- false, __ATOMIC_RELAXED,
- __ATOMIC_RELAXED) &&
+ rte_memory_order_relaxed,
+ rte_memory_order_relaxed) &&
expected == HWS_AGE_AGED_OUT_REPORTED)
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
}
return 0;
}
@@ -976,9 +975,9 @@ struct mlx5_hws_cnt_pool *
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
MLX5_ASSERT(param != NULL);
- if (__atomic_compare_exchange_n(¶m->state, &expected,
- HWS_AGE_AGED_OUT_REPORTED, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(¶m->state, &expected,
+ HWS_AGE_AGED_OUT_REPORTED,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
return param->context;
switch (expected) {
case HWS_AGE_FREE:
@@ -990,8 +989,8 @@ struct mlx5_hws_cnt_pool *
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
break;
case HWS_AGE_CANDIDATE_INSIDE_RING:
- __atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(¶m->state, HWS_AGE_CANDIDATE,
+ rte_memory_order_relaxed);
break;
case HWS_AGE_CANDIDATE:
/*
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index 1cb0564..db4e99e 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -101,7 +101,7 @@ struct __rte_cache_aligned mlx5_hws_cnt_pool {
LIST_ENTRY(mlx5_hws_cnt_pool) next;
alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_pool_cfg cfg;
alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_dcs_mng dcs_mng;
- alignas(RTE_CACHE_LINE_SIZE) uint32_t query_gen;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) query_gen;
struct mlx5_hws_cnt *pool;
struct mlx5_hws_cnt_raw_data_mng *raw_mng;
struct rte_ring *reuse_list;
@@ -134,10 +134,10 @@ enum {
/* HWS counter age parameter. */
struct __rte_cache_aligned mlx5_hws_age_param {
- uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
- uint32_t sec_since_last_hit;
+ RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically accessed). */
+ RTE_ATOMIC(uint32_t) sec_since_last_hit;
/* Time in seconds since last hit (atomically accessed). */
- uint16_t state; /* AGE state (atomically accessed). */
+ RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
uint64_t accumulator_last_hits;
/* Last total value of hits for comparing. */
uint64_t accumulator_hits;
@@ -426,7 +426,7 @@ struct __rte_cache_aligned mlx5_hws_age_param {
iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
hpool->pool[iidx].in_used = false;
hpool->pool[iidx].query_gen_when_free =
- __atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
qcache = hpool->cache->qcache[*queue];
if (unlikely(qcache == NULL)) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index fb4d8e6..d008e4d 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {
/* RX queue private data. */
struct mlx5_rxq_priv {
uint16_t idx; /* Queue index. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -188,7 +188,7 @@ struct mlx5_rxq_priv {
/* External RX queue descriptor. */
struct mlx5_external_rxq {
uint32_t hw_id; /* Queue index in the Hardware. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
};
/* mlx5_rxq.c */
@@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
void *addr;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 1) {
MLX5_ASSERT(rep != NULL);
/* Replace MPRQ buf. */
(*rxq->mprq_bufs)[rq_idx] = rep;
@@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
void *buf_addr;
/* Increment the refcnt of the whole chunk. */
- __atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
- MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
- __ATOMIC_RELAXED) <= strd_n + 1);
+ rte_atomic_fetch_add_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
+ MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
+ rte_memory_order_relaxed) <= strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
/*
* MLX5 device doesn't use iova but it is necessary in a
@@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
if (!priv->ext_rxqs || queue_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
return false;
rxq = &priv->ext_rxqs[queue_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
- return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+ return !!rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed);
}
#define LWM_COOKIE_RXQID_OFFSET 0
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dd51687..f67aaa6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -416,7 +416,7 @@
rte_errno = EINVAL;
return -rte_errno;
}
- return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1319,7 +1319,7 @@
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
for (j = 0; j != strd_n; ++j) {
shinfo = &buf->shinfos[j];
shinfo->free_cb = mlx5_mprq_buf_free_cb;
@@ -2037,7 +2037,7 @@ struct mlx5_rxq_priv *
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
if (rxq != NULL)
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2059,7 +2059,7 @@ struct mlx5_rxq_priv *
if (rxq == NULL)
return 0;
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2138,7 +2138,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
return rxq;
}
@@ -2158,7 +2158,7 @@ struct mlx5_external_rxq *
{
struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
- return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
}
/**
@@ -2447,8 +2447,8 @@ struct mlx5_ind_table_obj *
(memcmp(ind_tbl->queues, queues,
ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
== 0)) {
- __atomic_fetch_add(&ind_tbl->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
+ rte_memory_order_relaxed);
break;
}
}
@@ -2479,7 +2479,7 @@ struct mlx5_ind_table_obj *
unsigned int ret;
rte_rwlock_write_lock(&priv->ind_tbls_lock);
- ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
@@ -2561,7 +2561,7 @@ struct mlx5_ind_table_obj *
}
return ret;
}
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
return 0;
}
@@ -2626,7 +2626,7 @@ struct mlx5_ind_table_obj *
{
uint32_t refcnt;
- refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+ refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
if (refcnt <= 1)
return 0;
/*
@@ -3258,8 +3258,8 @@ struct mlx5_hrxq *
ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
if (ext_rxq == NULL)
return -rte_errno;
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
if (ext_rxq->hw_id != hw_idx) {
DRV_LOG(ERR, "Port %u external RxQ index %u "
"is already mapped to HW index (requesting is "
@@ -3296,8 +3296,8 @@ struct mlx5_hrxq *
rte_errno = EINVAL;
return -rte_errno;
}
- if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
port_id, dpdk_idx);
rte_errno = EINVAL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f8d6728..c241a1d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1441,7 +1441,7 @@
rte_delay_us_sleep(1000 * priv->rxqs_n);
DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
if (priv->sh->config.dv_flow_en == 2) {
- if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed))
flow_hw_rxq_flag_set(dev, false);
} else {
mlx5_flow_stop_default(dev);
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 107d7ab..0d77ff8 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -179,7 +179,7 @@ struct __rte_cache_aligned mlx5_txq_data {
__extension__
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
+ RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
bool is_hairpin; /* Whether TxQ type is Hairpin. */
unsigned int max_inline_data; /* Max inline data. */
@@ -339,8 +339,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
* the service thread, data should be re-read.
*/
rte_compiler_barrier();
- ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
- ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
+ ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
+ ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
break;
@@ -350,8 +350,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts -= ts;
if (unlikely(mts >= UINT64_MAX / 2)) {
/* We have negative integer, mts is in the past. */
- __atomic_fetch_add(&sh->txpp.err_ts_past,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
+ 1, rte_memory_order_relaxed);
return -1;
}
tick = sh->txpp.tick;
@@ -360,8 +360,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
mts = (mts + tick - 1) / tick;
if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
/* We have mts is too distant future. */
- __atomic_fetch_add(&sh->txpp.err_ts_future,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
+ 1, rte_memory_order_relaxed);
return -1;
}
mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
@@ -1743,8 +1743,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
/* Convert the timestamp into completion to wait. */
ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
if (txq->ts_last && ts < txq->ts_last)
- __atomic_fetch_add(&txq->sh->txpp.err_ts_order,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
+ 1, rte_memory_order_relaxed);
txq->ts_last = ts;
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
sh = txq->sh;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 5a5df2d..4e26fa2 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -538,12 +538,12 @@
uint64_t *ps;
rte_compiler_barrier();
- tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
- op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
+ tm = rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed);
+ op = rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed);
rte_compiler_barrier();
- if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
+ if (tm != rte_atomic_load_explicit(cqe + 0, rte_memory_order_relaxed))
continue;
- if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
+ if (op != rte_atomic_load_explicit(cqe + 1, rte_memory_order_relaxed))
continue;
ps = (uint64_t *)ts;
ps[0] = tm;
@@ -561,8 +561,8 @@
ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
rte_compiler_barrier();
- __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, rte_memory_order_relaxed);
rte_wmb();
}
@@ -590,8 +590,8 @@
*/
DRV_LOG(DEBUG,
"Clock Queue error sync lost (%X).", opcode);
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
return;
@@ -633,10 +633,10 @@
if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
return;
MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
- sh->txpp.ts.ts, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
- sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+ sh->txpp.ts.ts, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+ sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
sh->txpp.ts_p = 0;
if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -677,8 +677,8 @@
/* Check whether we have missed interrupts. */
if (cq_ci - wq->cq_ci != 1) {
DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
- __atomic_fetch_add(&sh->txpp.err_miss_int,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
+ 1, rte_memory_order_relaxed);
/* Check sync lost on wqe index. */
if (cq_ci - wq->cq_ci >=
(((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -693,8 +693,8 @@
/* Fire new requests to Rearm Queue. */
if (error) {
DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_rearm_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
}
}
@@ -987,8 +987,8 @@
mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
if (to.cts.op_own >> 4) {
DRV_LOG(DEBUG, "Clock Queue error sync lost.");
- __atomic_fetch_add(&sh->txpp.err_clock_queue,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+ 1, rte_memory_order_relaxed);
sh->txpp.sync_lost = 1;
return -EIO;
}
@@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, rte_memory_order_relaxed);
return 0;
}
@@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
do {
uint64_t ts, ci;
- ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
- ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
+ ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, rte_memory_order_relaxed);
+ ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, rte_memory_order_relaxed);
rte_compiler_barrier();
if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ts,
- __ATOMIC_RELAXED) != ts)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
+ rte_memory_order_relaxed) != ts)
continue;
- if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
- __ATOMIC_RELAXED) != ci)
+ if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
+ rte_memory_order_relaxed) != ci)
continue;
tsa->ts = ts;
tsa->ci_ts = ci;
@@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
for (i = 0; i < n_txpp; ++i)
stats[n_used + i].id = n_used + i;
stats[n_used + 0].value =
- __atomic_load_n(&sh->txpp.err_miss_int,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_miss_int,
+ rte_memory_order_relaxed);
stats[n_used + 1].value =
- __atomic_load_n(&sh->txpp.err_rearm_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
+ rte_memory_order_relaxed);
stats[n_used + 2].value =
- __atomic_load_n(&sh->txpp.err_clock_queue,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
+ rte_memory_order_relaxed);
stats[n_used + 3].value =
- __atomic_load_n(&sh->txpp.err_ts_past,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_past,
+ rte_memory_order_relaxed);
stats[n_used + 4].value =
- __atomic_load_n(&sh->txpp.err_ts_future,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_future,
+ rte_memory_order_relaxed);
stats[n_used + 5].value =
- __atomic_load_n(&sh->txpp.err_ts_order,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&sh->txpp.err_ts_order,
+ rte_memory_order_relaxed);
stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
stats[n_used + 8].value = sh->txpp.sync_lost;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 14f55e8..da4236f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *
rte_errno = ENOMEM;
goto error;
}
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
tmpl->is_hairpin = false;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->is_hairpin = true;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, rte_memory_order_relaxed);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
- __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, rte_memory_order_relaxed);
}
return ctrl;
}
@@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *
if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
return 0;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
+ if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, rte_memory_order_relaxed) - 1 > 1)
return 1;
if (txq_ctrl->obj) {
priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *
txq_free_elts(txq_ctrl);
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
- if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, rte_memory_order_relaxed)) {
if (!txq_ctrl->is_hairpin)
mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq_ctrl, next);
@@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *
if (!(*priv->txqs)[idx])
return -1;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
+ return (rte_atomic_load_explicit(&txq->refcnt, rte_memory_order_relaxed) == 1);
}
/**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index e28db2e..fc03cc0 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -203,7 +203,7 @@ struct mlx5_indexed_pool *
struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
lc = pool->cache[cidx]->lc;
- gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
+ gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
if (gc && lc != gc) {
mlx5_ipool_lock(pool);
if (lc && !(--lc->ref_cnt))
@@ -266,8 +266,8 @@ struct mlx5_indexed_pool *
pool->cache[cidx]->len = fetch_size - 1;
return pool->cache[cidx]->idx[pool->cache[cidx]->len];
}
- trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
- __ATOMIC_ACQUIRE) : 0;
+ trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
+ rte_memory_order_acquire) : 0;
trunk_n = lc ? lc->n_trunk : 0;
cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
/* Check if index reach maximum. */
@@ -332,11 +332,11 @@ struct mlx5_indexed_pool *
lc = p;
lc->ref_cnt = 1;
pool->cache[cidx]->lc = lc;
- __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&pool->gc, p, rte_memory_order_relaxed);
}
/* Add trunk to trunks array. */
lc->trunks[trunk_idx] = trunk;
- __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, rte_memory_order_relaxed);
/* Enqueue half of the index to global. */
ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
fetch_size = trunk->free >> 1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index b51d977..d86a809 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {
struct mlx5_indexed_cache {
struct mlx5_indexed_trunk **trunks;
- volatile uint32_t n_trunk_valid; /* Trunks allocated. */
+ volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
uint32_t n_trunk; /* Trunk pointer array size. */
uint32_t ref_cnt;
uint32_t len;
@@ -266,7 +266,7 @@ struct mlx5_indexed_pool {
uint32_t free_list; /* Index to first free trunk. */
};
struct {
- struct mlx5_indexed_cache *gc;
+ RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
/* Global cache. */
struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
/* Local cache. */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 02/45] net/ixgbe: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 01/45] net/mlx5: use rte " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 03/45] net/iavf: " Tyler Retzlaff
` (43 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 14 ++++++++------
drivers/net/ixgbe/ixgbe_ethdev.h | 2 +-
drivers/net/ixgbe/ixgbe_rxtx.c | 4 ++--
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c61c52b..e63ae1a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1130,7 +1130,7 @@ struct rte_ixgbe_xstats_name_off {
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbe_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -1638,7 +1638,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
ixgbevf_parse_devargs(eth_dev->data->dev_private,
pci_dev->device.devargs);
@@ -4203,7 +4203,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
/* NOTE: review for potential ordering optimization */
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -4240,7 +4240,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -4336,7 +4336,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
ixgbe_dev_wait_setup_link_complete(dev, 0);
/* NOTE: review for potential ordering optimization */
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the IXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -4348,7 +4349,8 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR,
"Create link thread failed!");
/* NOTE: review for potential ordering optimization */
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 22fc3be..8ad841e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -511,7 +511,7 @@ struct ixgbe_adapter {
*/
uint8_t pflink_fullchk;
uint8_t mac_ctrl_frame_fwd;
- bool link_thread_running;
+ RTE_ATOMIC(bool) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 3d39eaa..0d42fd8 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1831,7 +1831,7 @@ const alignas(RTE_CACHE_LINE_SIZE) uint32_t
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
@@ -2114,7 +2114,7 @@ const alignas(RTE_CACHE_LINE_SIZE) uint32_t
* Use acquire fence to ensure that status_error which includes
* DD bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 03/45] net/iavf: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 01/45] net/mlx5: use rte " Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 02/45] net/ixgbe: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 04/45] net/ice: " Tyler Retzlaff
` (42 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/iavf/iavf.h | 16 ++++++++--------
drivers/net/iavf/iavf_rxtx.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_neon.c | 2 +-
drivers/net/iavf/iavf_vchnl.c | 14 +++++++-------
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 7ab41c9..ad526c6 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -238,8 +238,8 @@ struct iavf_info {
struct virtchnl_vlan_caps vlan_v2_caps;
uint64_t supported_rxdid;
uint8_t *proto_xtr; /* proto xtr type for all queues */
- volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
- uint32_t pend_cmd_count;
+ volatile RTE_ATOMIC(enum virtchnl_ops) pend_cmd; /* pending command not finished */
+ RTE_ATOMIC(uint32_t) pend_cmd_count;
int cmd_retval; /* return value of the cmd response from PF */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
@@ -456,13 +456,13 @@ struct iavf_cmd_info {
_atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 1, rte_memory_order_relaxed);
return !ret;
}
@@ -472,13 +472,13 @@ struct iavf_cmd_info {
_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
- int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ int ret = rte_atomic_compare_exchange_strong_explicit(&vf->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
- __atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->pend_cmd_count, 2, rte_memory_order_relaxed);
return !ret;
}
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 59a0b9e..ecc3143 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2025,7 +2025,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
@@ -2152,7 +2152,7 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many contiguous DD bits were set */
for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
index 83825aa..20b656e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_neon.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
@@ -273,7 +273,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 1111d30..6d5969f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -41,7 +41,7 @@ struct iavf_event_element {
};
struct iavf_event_handler {
- uint32_t ndev;
+ RTE_ATOMIC(uint32_t) ndev;
rte_thread_t tid;
int fd[2];
pthread_mutex_t lock;
@@ -129,7 +129,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_add(&handler->ndev, 1, __ATOMIC_RELAXED) + 1 != 1)
+ if (rte_atomic_fetch_add_explicit(&handler->ndev, 1, rte_memory_order_relaxed) + 1 != 1)
return 0;
#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
@@ -137,7 +137,7 @@ struct iavf_event_handler {
int err = pipe(handler->fd);
#endif
if (err != 0) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -146,7 +146,7 @@ struct iavf_event_handler {
if (rte_thread_create_internal_control(&handler->tid, "iavf-event",
iavf_dev_event_handle, NULL)) {
- __atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed);
return -1;
}
@@ -158,7 +158,7 @@ struct iavf_event_handler {
{
struct iavf_event_handler *handler = &event_handler;
- if (__atomic_fetch_sub(&handler->ndev, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&handler->ndev, 1, rte_memory_order_relaxed) - 1 != 0)
return;
int unused = pthread_cancel((pthread_t)handler->tid.opaque_id);
@@ -574,8 +574,8 @@ struct iavf_event_handler {
/* read message and it's expected one */
if (msg_opc == vf->pend_cmd) {
uint32_t cmd_count =
- __atomic_fetch_sub(&vf->pend_cmd_count,
- 1, __ATOMIC_RELAXED) - 1;
+ rte_atomic_fetch_sub_explicit(&vf->pend_cmd_count,
+ 1, rte_memory_order_relaxed) - 1;
if (cmd_count == 0)
_notify_cmd(vf, msg_ret);
} else {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 04/45] net/ice: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (2 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 03/45] net/iavf: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 05/45] net/i40e: " Tyler Retzlaff
` (41 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ice/base/ice_osdep.h | 4 ++--
drivers/net/ice/ice_dcf.c | 6 +++---
drivers/net/ice/ice_dcf.h | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 8 ++++----
drivers/net/ice/ice_dcf_parent.c | 16 ++++++++--------
drivers/net/ice/ice_ethdev.c | 12 ++++++------
drivers/net/ice/ice_ethdev.h | 2 +-
7 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index 0e14b93..c17f1bf 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -235,7 +235,7 @@ struct ice_lock {
ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
struct ice_dma_mem *mem, u64 size)
{
- static uint64_t ice_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) ice_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -243,7 +243,7 @@ struct ice_lock {
return NULL;
snprintf(z_name, sizeof(z_name), "ice_dma_%" PRIu64,
- __atomic_fetch_add(&ice_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&ice_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
0, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 7f8f516..204d4ea 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -764,7 +764,7 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
rte_spinlock_init(&hw->vc_cmd_queue_lock);
TAILQ_INIT(&hw->vc_cmd_queue);
- __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->vsi_update_thread_num, 0, rte_memory_order_relaxed);
hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
if (hw->arq_buf == NULL) {
@@ -888,8 +888,8 @@ struct virtchnl_proto_hdrs ice_dcf_inner_ipv6_sctp_tmplt = {
ice_dcf_dev_interrupt_handler, hw);
/* Wait for all `ice-thread` threads to exit. */
- while (__atomic_load_n(&hw->vsi_update_thread_num,
- __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(&hw->vsi_update_thread_num,
+ rte_memory_order_acquire) != 0)
rte_delay_ms(ICE_DCF_CHECK_INTERVAL);
ice_dcf_mode_disable(hw);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index aa2a723..7726681 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -105,7 +105,7 @@ struct ice_dcf_hw {
void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen);
- int vsi_update_thread_num;
+ RTE_ATOMIC(int) vsi_update_thread_num;
uint8_t *arq_buf;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index d58ec9d..8f3a385 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -1743,7 +1743,7 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
ice_dcf_adminq_need_retry(struct ice_adapter *ad)
{
return ad->hw.dcf_enabled &&
- !__atomic_load_n(&ad->dcf_state_on, __ATOMIC_RELAXED);
+ !rte_atomic_load_explicit(&ad->dcf_state_on, rte_memory_order_relaxed);
}
/* Add UDP tunneling port */
@@ -1944,12 +1944,12 @@ static int ice_dcf_xstats_get(struct rte_eth_dev *dev,
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
return -1;
}
- __atomic_store_n(&parent_adapter->dcf_state_on, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true, rte_memory_order_relaxed);
if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter");
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 6e845f4..a478b69 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -123,8 +123,8 @@ struct ice_dcf_reset_event_param {
container_of(hw, struct ice_dcf_adapter, real_hw);
struct ice_adapter *parent_adapter = &adapter->parent;
- __atomic_fetch_add(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_relaxed);
rte_thread_detach(rte_thread_self());
@@ -133,8 +133,8 @@ struct ice_dcf_reset_event_param {
rte_spinlock_lock(&vsi_update_lock);
if (!ice_dcf_handle_vsi_update_event(hw)) {
- __atomic_store_n(&parent_adapter->dcf_state_on, true,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, true,
+ rte_memory_order_relaxed);
ice_dcf_update_vf_vsi_map(&adapter->parent.hw,
hw->num_vfs, hw->vf_vsi_map);
}
@@ -156,8 +156,8 @@ struct ice_dcf_reset_event_param {
free(param);
- __atomic_fetch_sub(&hw->vsi_update_thread_num, 1,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&hw->vsi_update_thread_num, 1,
+ rte_memory_order_release);
return 0;
}
@@ -269,8 +269,8 @@ struct ice_dcf_reset_event_param {
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
- __atomic_store_n(&parent_adapter->dcf_state_on, false,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&parent_adapter->dcf_state_on, false,
+ rte_memory_order_relaxed);
start_vsi_reset_thread(dcf_hw, true,
pf_msg->event_data.vf_vsi_map.vf_id);
break;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 87385d2..0f35c6a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -4062,9 +4062,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = &dev->data->dev_link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
@@ -4078,9 +4078,9 @@ static int ice_init_rss(struct ice_pf *pf)
struct rte_eth_link *src = link;
/* NOTE: review for potential ordering optimization */
- if (!__atomic_compare_exchange_n((uint64_t *)dst, (uint64_t *)dst,
- *(uint64_t *)src, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ if (!rte_atomic_compare_exchange_strong_explicit((uint64_t __rte_atomic *)dst,
+ (uint64_t *)dst, *(uint64_t *)src,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst))
return -1;
return 0;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 984479a..d73faae 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -621,7 +621,7 @@ struct ice_adapter {
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
/* True if DCF state of the associated PF is on */
- bool dcf_state_on;
+ RTE_ATOMIC(bool) dcf_state_on;
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 05/45] net/i40e: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (3 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 04/45] net/ice: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 06/45] net/hns3: " Tyler Retzlaff
` (40 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/i40e/i40e_ethdev.c | 4 ++--
drivers/net/i40e/i40e_rxtx.c | 6 +++---
drivers/net/i40e/i40e_rxtx_vec_neon.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 380ce1a..801cc95 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -4687,7 +4687,7 @@ enum i40e_status_code
u64 size,
u32 alignment)
{
- static uint64_t i40e_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) i40e_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -4695,7 +4695,7 @@ enum i40e_status_code
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
- __atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&i40e_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 5d25ab4..155f243 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -486,7 +486,7 @@
}
/* This barrier is to order loads of different words in the descriptor */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) {
@@ -745,7 +745,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
@@ -867,7 +867,7 @@
* Use acquire fence to ensure that qword1 which includes DD
* bit is loaded before loading of other descriptor words.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
rxd = *rxdp;
nb_hold++;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index d873e30..3a99137 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -425,7 +425,7 @@
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* Use acquire fence to order loads of descriptor qwords */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* A.2 reload qword0 to make it ordered after qword1 load */
descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 06/45] net/hns3: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (4 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 05/45] net/i40e: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 07/45] net/bnxt: " Tyler Retzlaff
` (39 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/hns3/hns3_cmd.c | 18 ++++++------
drivers/net/hns3/hns3_dcb.c | 2 +-
drivers/net/hns3/hns3_ethdev.c | 36 +++++++++++------------
drivers/net/hns3/hns3_ethdev.h | 32 ++++++++++-----------
drivers/net/hns3/hns3_ethdev_vf.c | 60 +++++++++++++++++++--------------------
drivers/net/hns3/hns3_intr.c | 36 +++++++++++------------
drivers/net/hns3/hns3_intr.h | 4 +--
drivers/net/hns3/hns3_mbx.c | 6 ++--
drivers/net/hns3/hns3_mp.c | 6 ++--
drivers/net/hns3/hns3_rxtx.c | 10 +++----
drivers/net/hns3/hns3_tm.c | 4 +--
11 files changed, 107 insertions(+), 107 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 001ff49..3c5fdbe 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -44,12 +44,12 @@
hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
uint64_t size, uint32_t alignment)
{
- static uint64_t hns3_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
- __atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
@@ -198,8 +198,8 @@
hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
csq->next_to_use, csq->next_to_clean);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- __atomic_store_n(&hw->reset.disable_cmd, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
+ rte_memory_order_relaxed);
hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
}
@@ -313,7 +313,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (hns3_cmd_csq_done(hw))
return 0;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw,
"Don't wait for reply because of disable_cmd");
return -EBUSY;
@@ -360,7 +360,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
int retval;
uint32_t ntc;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->cmq.csq.lock);
@@ -747,7 +747,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
ret = -EBUSY;
goto err_cmd_init;
}
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
ret = hns3_cmd_query_firmware_version_and_capability(hw);
if (ret) {
@@ -790,7 +790,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
return 0;
err_cmd_init:
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return ret;
}
@@ -819,7 +819,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw)
if (!hns->is_vf)
(void)hns3_firmware_compat_config(hw, false);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
/*
* A delay is added to ensure that the register cleanup operations
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 915e4eb..2f917fe 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -648,7 +648,7 @@
* and configured directly to the hardware in the RESET_STAGE_RESTORE
* stage of the reset process.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
for (i = 0; i < hw->rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] =
i % hw->alloc_rss_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a..327f6fe 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -99,7 +99,7 @@ struct hns3_intr_state {
};
static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
@@ -134,7 +134,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B);
hw->reset.stats.imp_cnt++;
@@ -148,7 +148,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
{
struct hns3_hw *hw = &hns->hw;
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending);
*vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B);
hw->reset.stats.global_cnt++;
@@ -1151,7 +1151,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* ensure that the hardware configuration remains unchanged before and
* after reset.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hw->port_base_vlan_cfg.state = HNS3_PORT_BASE_VLAN_DISABLE;
hw->port_base_vlan_cfg.pvid = HNS3_INVALID_PVID;
}
@@ -1175,7 +1175,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* we will restore configurations to hardware in hns3_restore_vlan_table
* and hns3_restore_vlan_conf later.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
ret = hns3_vlan_pvid_configure(hns, HNS3_INVALID_PVID, 0);
if (ret) {
hns3_err(hw, "pvid set fail in pf, ret =%d", ret);
@@ -5059,7 +5059,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -5150,7 +5150,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
ret = hns3_cfg_mac_mode(hw, false);
@@ -5158,7 +5158,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
return ret;
hw->mac.link_status = RTE_ETH_LINK_DOWN;
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -5184,7 +5184,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_tm_dev_stop_proc(hw);
hns3_config_mac_tnl_int(hw, false);
hns3_stop_tqps(hw);
@@ -5577,7 +5577,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
last_req = hns3_get_reset_level(hns, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -5677,7 +5677,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
}
static enum hns3_reset_level
-hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+hns3_get_reset_level(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
struct hns3_hw *hw = &hns->hw;
enum hns3_reset_level reset_level = HNS3_NONE_RESET;
@@ -5737,7 +5737,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* any mailbox handling or command to firmware is only valid
* after hns3_cmd_init is called.
*/
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hw->reset.stats.request_cnt++;
break;
case HNS3_IMP_RESET:
@@ -5792,7 +5792,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -5913,10 +5913,10 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
@@ -5925,7 +5925,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Check if there is any ongoing reset in the hardware. This status can
@@ -6576,7 +6576,7 @@ static int hns3_remove_mc_mac_addr(struct hns3_hw *hw,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index a4bc62a..a6b6524 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -401,17 +401,17 @@ enum hns3_schedule {
struct hns3_reset_data {
enum hns3_reset_stage stage;
- uint16_t schedule;
+ RTE_ATOMIC(uint16_t) schedule;
/* Reset flag, covering the entire reset process */
- uint16_t resetting;
+ RTE_ATOMIC(uint16_t) resetting;
/* Used to disable sending cmds during reset */
- uint16_t disable_cmd;
+ RTE_ATOMIC(uint16_t) disable_cmd;
/* The reset level being processed */
enum hns3_reset_level level;
/* Reset level set, each bit represents a reset level */
- uint64_t pending;
+ RTE_ATOMIC(uint64_t) pending;
/* Request reset level set, from interrupt or mailbox */
- uint64_t request;
+ RTE_ATOMIC(uint64_t) request;
int attempts; /* Reset failure retry */
int retries; /* Timeout failure retry in reset_post */
/*
@@ -499,7 +499,7 @@ struct hns3_hw {
* by dev_set_link_up() or dev_start().
*/
bool set_link_down;
- unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+ RTE_ATOMIC(unsigned int) secondary_cnt; /* Number of secondary processes init'd. */
struct hns3_tqp_stats tqp_stats;
/* Include Mac stats | Rx stats | Tx stats */
struct hns3_mac_stats mac_stats;
@@ -844,7 +844,7 @@ struct hns3_vf {
struct hns3_adapter *adapter;
/* Whether PF support push link status change to VF */
- uint16_t pf_push_lsc_cap;
+ RTE_ATOMIC(uint16_t) pf_push_lsc_cap;
/*
* If PF support push link status change, VF still need send request to
@@ -853,7 +853,7 @@ struct hns3_vf {
*/
uint16_t req_link_info_cnt;
- uint16_t poll_job_started; /* whether poll job is started */
+ RTE_ATOMIC(uint16_t) poll_job_started; /* whether poll job is started */
};
struct hns3_adapter {
@@ -997,32 +997,32 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
hns3_read_reg((a)->io_base, (reg))
static inline uint64_t
-hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_test_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t res;
- res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
+ res = (rte_atomic_load_explicit(addr, rte_memory_order_relaxed) & (1UL << nr)) != 0;
return res;
}
static inline void
-hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_set_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_or_explicit(addr, (1UL << nr), rte_memory_order_relaxed);
}
static inline void
-hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_atomic_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
- __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
+ rte_atomic_fetch_and_explicit(addr, ~(1UL << nr), rte_memory_order_relaxed);
}
static inline uint64_t
-hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
+hns3_test_and_clear_bit(unsigned int nr, volatile RTE_ATOMIC(uint64_t) *addr)
{
uint64_t mask = (1UL << nr);
- return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
+ return rte_atomic_fetch_and_explicit(addr, ~mask, rte_memory_order_relaxed) & mask;
}
int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a..b83d5b9 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -37,7 +37,7 @@ enum hns3vf_evt_cause {
};
static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
- uint64_t *levels);
+ RTE_ATOMIC(uint64_t) *levels);
static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
@@ -484,7 +484,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* MTU value issued by hns3 VF PMD must be less than or equal to
* PF's MTU.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "Failed to set mtu during resetting");
return -EIO;
}
@@ -565,7 +565,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
val = hns3_read_dev(hw, HNS3_VF_RST_ING);
hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
@@ -634,8 +634,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
}
static void
@@ -650,8 +650,8 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
struct hns3_vf_to_pf_msg req;
- __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
+ rte_memory_order_release);
hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
@@ -666,7 +666,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* mailbox from PF driver to get this capability.
*/
hns3vf_handle_mbx_msg(hw);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
break;
remain_ms--;
@@ -677,10 +677,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* state: unknown (means pf not ack), not_supported, supported.
* Here config it as 'not_supported' when it's 'unknown' state.
*/
- __atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
+ rte_memory_order_acquire, rte_memory_order_acquire);
- if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
hns3_info(hw, "detect PF support push link status change!");
} else {
@@ -920,7 +920,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
bool send_req;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return;
send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
@@ -956,7 +956,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* sending request to PF kernel driver, then could update link status by
* process PF kernel driver's link status mailbox message.
*/
- if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
+ if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
return;
if (hw->adapter_state != HNS3_NIC_STARTED)
@@ -994,7 +994,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_hw *hw = &hns->hw;
int ret;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw,
"vf set vlan id failed during resetting, vlan_id =%u",
vlan_id);
@@ -1059,7 +1059,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
unsigned int tmp_mask;
int ret = 0;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
mask);
return -EIO;
@@ -1252,7 +1252,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
- __atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
hns3vf_service_handler(dev);
}
@@ -1264,7 +1264,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
rte_eal_alarm_cancel(hns3vf_service_handler, dev);
- __atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
}
static int
@@ -1500,10 +1500,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* during reset and is required to be released after the reset is
* completed.
*/
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0)
hns3_dev_release_mbufs(hns);
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
hns3_configure_all_mac_addr(hns, true);
ret = hns3_reset_all_tqps(hns);
if (ret) {
@@ -1528,7 +1528,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_stop_rxtx_datapath(dev);
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
hns3_stop_tqps(hw);
hns3vf_do_stop(hns);
hns3_unmap_rx_interrupt(dev);
@@ -1643,7 +1643,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
int ret;
PMD_INIT_FUNC_TRACE();
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
return -EBUSY;
rte_spinlock_lock(&hw->lock);
@@ -1773,7 +1773,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
if (last_req == HNS3_NONE_RESET || last_req < new_req) {
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
hns3_schedule_delayed_reset(hns);
hns3_warn(hw, "High level reset detected, delay do reset");
return true;
@@ -1847,7 +1847,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
if (ret)
return ret;
}
- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
return 0;
}
@@ -1888,7 +1888,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* from table space. Hence, for function reset software intervention is
* required to delete the entries.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
hns3_configure_all_mc_mac_addr(hns, true);
rte_spinlock_unlock(&hw->lock);
@@ -2030,7 +2030,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
}
static enum hns3_reset_level
-hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
enum hns3_reset_level reset_level;
@@ -2070,10 +2070,10 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
* The interrupt may have been lost. It is necessary to handle
* the interrupt to recover from the error.
*/
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
@@ -2082,7 +2082,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
}
}
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
/*
* Hardware reset has been notified, we now have to poll & check if
@@ -2278,7 +2278,7 @@ static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
hw->adapter_state = HNS3_NIC_INITIALIZED;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_PENDING) {
hns3_err(hw, "Reschedule reset service after dev_init");
hns3_schedule_reset(hns);
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 916bf30..26fa2eb 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -2033,7 +2033,7 @@ enum hns3_hw_err_report_type {
static int
hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
- int num, uint64_t *levels,
+ int num, RTE_ATOMIC(uint64_t) *levels,
enum hns3_hw_err_report_type err_type)
{
const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
@@ -2104,7 +2104,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2151,7 +2151,7 @@ enum hns3_hw_err_report_type {
}
void
-hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels)
{
uint32_t mpf_bd_num, pf_bd_num, bd_num;
struct hns3_hw *hw = &hns->hw;
@@ -2402,7 +2402,7 @@ enum hns3_hw_err_report_type {
hw->reset.request = 0;
hw->reset.pending = 0;
hw->reset.resetting = 0;
- __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
hw->reset.wait_data = rte_zmalloc("wait_data",
sizeof(struct hns3_wait_data), 0);
if (!hw->reset.wait_data) {
@@ -2419,8 +2419,8 @@ enum hns3_hw_err_report_type {
/* Reschedule the reset process after successful initialization */
if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) {
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING,
+ rte_memory_order_relaxed);
return;
}
@@ -2428,15 +2428,15 @@ enum hns3_hw_err_report_type {
return;
/* Schedule restart alarm if it is not scheduled yet */
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_REQUESTED)
return;
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
SCHEDULE_DEFERRED)
rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns);
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns);
}
@@ -2453,11 +2453,11 @@ enum hns3_hw_err_report_type {
return;
}
- if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) !=
+ if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) !=
SCHEDULE_NONE)
return;
- __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED,
+ rte_memory_order_relaxed);
rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns);
}
@@ -2537,7 +2537,7 @@ enum hns3_hw_err_report_type {
}
static void
-hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels)
+hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
{
uint64_t merge_cnt = hw->reset.stats.merge_cnt;
uint64_t tmp;
@@ -2633,7 +2633,7 @@ enum hns3_hw_err_report_type {
* Regardless of whether the execution is successful or not, the
* flow after execution must be continued.
*/
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
(void)hns3_cmd_init(hw);
reset_fail:
hw->reset.attempts = 0;
@@ -2661,7 +2661,7 @@ enum hns3_hw_err_report_type {
int ret;
if (hw->reset.stage == RESET_STAGE_NONE) {
- __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_DOWN;
hns3_report_reset_begin(hw);
ret = hw->reset.ops->stop_service(hns);
@@ -2750,7 +2750,7 @@ enum hns3_hw_err_report_type {
hns3_notify_reset_ready(hw, false);
hns3_clear_reset_level(hw, &hw->reset.pending);
hns3_clear_reset_status(hw);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.attempts = 0;
hw->reset.stats.success_cnt++;
hw->reset.stage = RESET_STAGE_NONE;
@@ -2812,7 +2812,7 @@ enum hns3_hw_err_report_type {
hw->reset.mbuf_deferred_free = false;
}
rte_spinlock_unlock(&hw->lock);
- __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed);
hw->reset.stage = RESET_STAGE_NONE;
hns3_clock_gettime(&tv);
timersub(&tv, &hw->reset.start_time, &tv_delta);
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index aca1c07..1edb07d 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -171,8 +171,8 @@ struct hns3_hw_error_desc {
};
int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en);
-void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
-void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels);
void hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en);
void hns3_handle_error(struct hns3_adapter *hns);
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index 9cdbc16..10c6e3b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -65,7 +65,7 @@
mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
while (wait_time < mbx_time_limit) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
hns3_err(hw, "Don't wait for mbx response because of "
"disable_cmd");
return -EBUSY;
@@ -382,7 +382,7 @@
rte_spinlock_lock(&hw->cmq.crq.lock);
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
@@ -457,7 +457,7 @@
}
while (!hns3_cmd_crq_empty(hw)) {
- if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
rte_spinlock_unlock(&hw->cmq.crq.lock);
return;
}
diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c
index 556f194..ba8f8ec 100644
--- a/drivers/net/hns3/hns3_mp.c
+++ b/drivers/net/hns3/hns3_mp.c
@@ -151,7 +151,7 @@
int i;
if (rte_eal_process_type() == RTE_PROC_SECONDARY ||
- __atomic_load_n(&hw->secondary_cnt, __ATOMIC_RELAXED) == 0)
+ rte_atomic_load_explicit(&hw->secondary_cnt, rte_memory_order_relaxed) == 0)
return;
if (!mp_req_type_is_valid(type)) {
@@ -277,7 +277,7 @@ void hns3_mp_req_stop_rxtx(struct rte_eth_dev *dev)
ret);
return ret;
}
- __atomic_fetch_add(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
} else {
ret = hns3_mp_init_primary();
if (ret) {
@@ -297,7 +297,7 @@ void hns3_mp_uninit(struct rte_eth_dev *dev)
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- __atomic_fetch_sub(&hw->secondary_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&hw->secondary_cnt, 1, rte_memory_order_relaxed);
process_data.eth_dev_cnt--;
if (process_data.eth_dev_cnt == 0) {
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 7e636a0..73a388b 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -4464,7 +4464,7 @@
struct hns3_adapter *hns = eth_dev->data->dev_private;
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
- __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
+ rte_atomic_load_explicit(&hns->hw.reset.resetting, rte_memory_order_relaxed) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hw->set_link_down ?
@@ -4530,7 +4530,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4586,7 +4586,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Rx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4615,7 +4615,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to start Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
@@ -4648,7 +4648,7 @@
rte_spinlock_lock(&hw->lock);
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
hns3_err(hw, "fail to stop Tx queue during resetting.");
rte_spinlock_unlock(&hw->lock);
return -EIO;
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index d969164..92a6685 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1051,7 +1051,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
/* don't goto fail_clear, user may try later */
@@ -1141,7 +1141,7 @@
if (error == NULL)
return -EINVAL;
- if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "device is resetting";
return -EBUSY;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 07/45] net/bnxt: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (5 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 06/45] net/hns3: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 08/45] net/cpfl: " Tyler Retzlaff
` (38 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/bnxt/bnxt_cpr.h | 4 ++--
drivers/net/bnxt/bnxt_rxq.h | 2 +-
drivers/net/bnxt/bnxt_rxr.c | 13 ++++++++-----
drivers/net/bnxt/bnxt_rxtx_vec_neon.c | 2 +-
drivers/net/bnxt/bnxt_stats.c | 4 ++--
5 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index c7b3480..43f06fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -107,7 +107,7 @@ struct bnxt_cp_ring_info {
/**
* Check validity of a completion ring entry. If the entry is valid, include a
- * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * C11 rte_memory_order_acquire fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
@@ -130,7 +130,7 @@ struct bnxt_cp_ring_info {
expected = !(raw_cons & ring_size);
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
if (valid == expected) {
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
return true;
}
return false;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 77bc382..36e0ac3 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
struct rte_mbuf fake_mbuf;
- uint64_t rx_mbuf_alloc_fail;
+ RTE_ATOMIC(uint64_t) rx_mbuf_alloc_fail;
uint8_t need_realloc;
const struct rte_memzone *mz;
};
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 957b7d6..69e8384 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -49,7 +49,8 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
rx_buf = &rxr->rx_buf_ring[prod];
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -86,7 +87,8 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
/* If buff has failed already, setting this again won't hurt */
rxq->need_realloc = 1;
return -ENOMEM;
@@ -465,7 +467,8 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -1677,8 +1680,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+ rte_memory_order_relaxed);
return -ENOMEM;
}
}
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 775400f..04864e0 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -240,7 +240,7 @@
rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
/* Use acquire fence to order loads of descriptor words. */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
/* Reload lower 64b of descriptors to make it ordered after info3_v. */
rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
((void *)&cpr->cp_desc_ring[cons + 7],
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 6a6feab..479f819 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -663,7 +663,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -724,7 +724,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
bnxt_stats->rx_nombuf +=
- __atomic_load_n(&rxq->rx_mbuf_alloc_fail, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, rte_memory_order_relaxed);
}
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 08/45] net/cpfl: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (6 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 07/45] net/bnxt: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 09/45] net/af_xdp: " Tyler Retzlaff
` (37 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/cpfl/cpfl_ethdev.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ef19aa1..5b47e22 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -300,8 +300,9 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed +=
+ rte_atomic_load_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -349,7 +350,8 @@ struct rte_cpfl_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
- __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 09/45] net/af_xdp: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (7 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 08/45] net/cpfl: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 10/45] net/octeon_ep: " Tyler Retzlaff
` (36 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 268a130..4833180 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -116,7 +116,7 @@ struct xsk_umem_info {
const struct rte_memzone *mz;
struct rte_mempool *mb_pool;
void *buffer;
- uint8_t refcnt;
+ RTE_ATOMIC(uint8_t) refcnt;
uint32_t max_xsks;
};
@@ -995,7 +995,8 @@ static int link_xdp_prog_with_dev(int ifindex, int fd, __u32 flags)
break;
xsk_socket__delete(rxq->xsk);
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1,
+ rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
/* free pkt_tx_queue */
@@ -1097,8 +1098,8 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
ret = -1;
goto out;
}
- if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
- __ATOMIC_ACQUIRE)) {
+ if (rte_atomic_load_explicit(&internals->rx_queues[i].umem->refcnt,
+ rte_memory_order_acquire)) {
*umem = internals->rx_queues[i].umem;
goto out;
}
@@ -1131,11 +1132,11 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
return NULL;
if (umem != NULL &&
- __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
+ rte_atomic_load_explicit(&umem->refcnt, rte_memory_order_acquire) <
umem->max_xsks) {
AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
internals->if_name, rxq->xsk_queue_idx);
- __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&umem->refcnt, 1, rte_memory_order_acquire);
}
}
@@ -1177,7 +1178,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
mb_pool->name, umem->max_xsks);
}
- __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&umem->refcnt, 1, rte_memory_order_release);
}
return umem;
@@ -1606,7 +1607,8 @@ struct msg_internal {
if (rxq->umem == NULL)
return -ENOMEM;
txq->umem = rxq->umem;
- reserve_before = __atomic_load_n(&rxq->umem->refcnt, __ATOMIC_ACQUIRE) <= 1;
+ reserve_before = rte_atomic_load_explicit(&rxq->umem->refcnt,
+ rte_memory_order_acquire) <= 1;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
@@ -1723,7 +1725,7 @@ struct msg_internal {
out_xsk:
xsk_socket__delete(rxq->xsk);
out_umem:
- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0)
+ if (rte_atomic_fetch_sub_explicit(&rxq->umem->refcnt, 1, rte_memory_order_acquire) - 1 == 0)
xdp_umem_destroy(rxq->umem);
return ret;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 10/45] net/octeon_ep: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (8 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 09/45] net/af_xdp: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 11/45] net/octeontx: " Tyler Retzlaff
` (35 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/octeon_ep/cnxk_ep_rx.h | 5 +++--
drivers/net/octeon_ep/cnxk_ep_tx.c | 5 +++--
drivers/net/octeon_ep/cnxk_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx2_ep_vf.c | 8 ++++----
drivers/net/octeon_ep/otx_ep_common.h | 4 ++--
drivers/net/octeon_ep/otx_ep_rxtx.c | 6 ++++--
6 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.h b/drivers/net/octeon_ep/cnxk_ep_rx.h
index ecf95cd..9422042 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.h
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.h
@@ -98,7 +98,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(droq->pkts_sent_ism, rte_memory_order_relaxed);
new_pkts = val - droq->pkts_sent_prev;
droq->pkts_sent_prev = val;
@@ -111,7 +111,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_tx.c b/drivers/net/octeon_ep/cnxk_ep_tx.c
index 233c8aa..e093140 100644
--- a/drivers/net/octeon_ep/cnxk_ep_tx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_tx.c
@@ -15,7 +15,7 @@
* This adds an extra local variable, but almost halves the
* number of PCIe writes.
*/
- val = __atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED);
+ val = rte_atomic_load_explicit(iq->inst_cnt_ism, rte_memory_order_relaxed);
iq->inst_cnt += val - iq->inst_cnt_prev;
iq->inst_cnt_prev = val;
@@ -27,7 +27,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 39f357e..39b28de 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -150,10 +150,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
- (void *)iq->inst_cnt_ism, ism_addr);
+ (void *)(uintptr_t)iq->inst_cnt_ism, ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
@@ -235,10 +235,10 @@
rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ CNXK_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
- oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ oq_no, (void *)(uintptr_t)droq->pkts_sent_ism, ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 25e0e5a..2aeebb4 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -300,10 +300,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS_ISM(iq_no));
iq->inst_cnt_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_IQ_ISM_OFFSET(iq_no));
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
- (void *)iq->inst_cnt_ism,
+ (void *)(uintptr_t)iq->inst_cnt_ism,
(unsigned int)ism_addr);
*iq->inst_cnt_ism = 0;
iq->inst_cnt_prev = 0;
@@ -386,10 +386,10 @@ static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_CNTS_ISM(oq_no));
droq->pkts_sent_ism =
- (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ (uint32_t __rte_atomic *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ OTX2_EP_OQ_ISM_OFFSET(oq_no));
otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
- (void *)droq->pkts_sent_ism,
+ (void *)(uintptr_t)droq->pkts_sent_ism,
(unsigned int)ism_addr);
*droq->pkts_sent_ism = 0;
droq->pkts_sent_prev = 0;
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 7776940..73eb0c9 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -218,7 +218,7 @@ struct otx_ep_iq_config {
*/
struct otx_ep_instr_queue {
/* Location in memory updated by SDP ISM */
- uint32_t *inst_cnt_ism;
+ RTE_ATOMIC(uint32_t) *inst_cnt_ism;
struct rte_mbuf **mbuf_list;
/* Pointer to the Virtual Base addr of the input ring. */
uint8_t *base_addr;
@@ -413,7 +413,7 @@ struct otx_ep_droq {
uint8_t ism_ena;
/* Pointer to host memory copy of output packet count, set by ISM */
- uint32_t *pkts_sent_ism;
+ RTE_ATOMIC(uint32_t) *pkts_sent_ism;
uint32_t pkts_sent_prev;
/* Statistics for this DROQ. */
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 59144e0..eb2d8c1 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -475,7 +475,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
- while (__atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(iq->inst_cnt_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
rte_mb();
}
@@ -871,7 +872,8 @@
rte_mb();
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- while (__atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED) >= val) {
+ while (rte_atomic_load_explicit(droq->pkts_sent_ism,
+ rte_memory_order_relaxed) >= val) {
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 11/45] net/octeontx: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (9 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 10/45] net/octeon_ep: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 12/45] net/cxgbe: " Tyler Retzlaff
` (34 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/octeontx/octeontx_ethdev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index e397136..3c21540 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -31,7 +31,7 @@
/* Useful in stopping/closing event device if no of
* eth ports are using it.
*/
-uint16_t evdev_refcnt;
+RTE_ATOMIC(uint16_t) evdev_refcnt;
#define OCTEONTX_QLM_MODE_SGMII 7
#define OCTEONTX_QLM_MODE_XFI 12
@@ -559,7 +559,7 @@ enum octeontx_link_speed {
return 0;
/* Stopping/closing event device once all eth ports are closed. */
- if (__atomic_fetch_sub(&evdev_refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&evdev_refcnt, 1, rte_memory_order_acquire) - 1 == 0) {
rte_event_dev_stop(nic->evdev);
rte_event_dev_close(nic->evdev);
}
@@ -1593,7 +1593,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
nic->pko_vfid = pko_vfid;
nic->port_id = port;
nic->evdev = evdev;
- __atomic_fetch_add(&evdev_refcnt, 1, __ATOMIC_ACQUIRE);
+ rte_atomic_fetch_add_explicit(&evdev_refcnt, 1, rte_memory_order_acquire);
res = octeontx_port_open(nic);
if (res < 0)
@@ -1844,7 +1844,7 @@ static void build_xstat_names(struct rte_eth_xstat_name *xstat_names)
}
}
- __atomic_store_n(&evdev_refcnt, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&evdev_refcnt, 0, rte_memory_order_release);
/*
* Do 1:1 links for ports & queues. All queues would be mapped to
* one port. If there are more ports than queues, then some ports
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 12/45] net/cxgbe: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (10 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 11/45] net/octeontx: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 13/45] net/gve: " Tyler Retzlaff
` (33 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/cxgbe/clip_tbl.c | 12 ++++++------
drivers/net/cxgbe/clip_tbl.h | 2 +-
drivers/net/cxgbe/cxgbe_main.c | 20 ++++++++++----------
drivers/net/cxgbe/cxgbe_ofld.h | 6 +++---
drivers/net/cxgbe/l2t.c | 12 ++++++------
drivers/net/cxgbe/l2t.h | 2 +-
drivers/net/cxgbe/mps_tcam.c | 21 +++++++++++----------
drivers/net/cxgbe/mps_tcam.h | 2 +-
drivers/net/cxgbe/smt.c | 12 ++++++------
drivers/net/cxgbe/smt.h | 2 +-
10 files changed, 46 insertions(+), 45 deletions(-)
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
index b709e26..8588b88 100644
--- a/drivers/net/cxgbe/clip_tbl.c
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -55,7 +55,7 @@ void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
int ret;
t4_os_lock(&ce->lock);
- if (__atomic_fetch_sub(&ce->refcnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ce->refcnt, 1, rte_memory_order_relaxed) - 1 == 0) {
ret = clip6_release_mbox(dev, ce->addr);
if (ret)
dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
@@ -79,7 +79,7 @@ static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
unsigned int clipt_size = c->clipt_size;
for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -114,12 +114,12 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce = find_or_alloc_clipe(ctbl, lip);
if (ce) {
t4_os_lock(&ce->lock);
- if (__atomic_load_n(&ce->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&ce->refcnt, rte_memory_order_relaxed) == 0) {
rte_memcpy(ce->addr, lip, sizeof(ce->addr));
if (v6) {
ce->type = FILTER_TYPE_IPV6;
- __atomic_store_n(&ce->refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ce->refcnt, 1,
+ rte_memory_order_relaxed);
ret = clip6_get_mbox(dev, lip);
if (ret)
dev_debug(adap,
@@ -129,7 +129,7 @@ static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
ce->type = FILTER_TYPE_IPV4;
}
} else {
- __atomic_fetch_add(&ce->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&ce->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&ce->lock);
}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
index 3b2be66..439fcf6 100644
--- a/drivers/net/cxgbe/clip_tbl.h
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -13,7 +13,7 @@ struct clip_entry {
enum filter_type type; /* entry type */
u32 addr[4]; /* IPV4 or IPV6 address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct clip_tbl {
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index c479454..2ed21f2 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -418,15 +418,15 @@ void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
- __atomic_fetch_sub(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_sub(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
}
@@ -448,15 +448,15 @@ void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
t->tid_tab[tid] = data;
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->hash_tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->hash_tids_in_use, 1,
+ rte_memory_order_relaxed);
} else {
if (family == FILTER_TYPE_IPV4)
- __atomic_fetch_add(&t->tids_in_use, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->tids_in_use, 1,
+ rte_memory_order_relaxed);
}
- __atomic_fetch_add(&t->conns_in_use, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&t->conns_in_use, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
index 7a4e30d..fd1e7d8 100644
--- a/drivers/net/cxgbe/cxgbe_ofld.h
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -60,10 +60,10 @@ struct tid_info {
unsigned int atids_in_use;
/* TIDs in the TCAM */
- u32 tids_in_use;
+ RTE_ATOMIC(u32) tids_in_use;
/* TIDs in the HASH */
- u32 hash_tids_in_use;
- u32 conns_in_use;
+ RTE_ATOMIC(u32) hash_tids_in_use;
+ RTE_ATOMIC(u32) conns_in_use;
alignas(RTE_CACHE_LINE_SIZE) rte_spinlock_t atid_lock;
rte_spinlock_t ftid_lock;
diff --git a/drivers/net/cxgbe/l2t.c b/drivers/net/cxgbe/l2t.c
index 21f4019..ecb5fec 100644
--- a/drivers/net/cxgbe/l2t.c
+++ b/drivers/net/cxgbe/l2t.c
@@ -14,8 +14,8 @@
*/
void cxgbe_l2t_release(struct l2t_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -112,7 +112,7 @@ static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
struct l2t_entry *first_free = NULL;
for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -151,18 +151,18 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
e->lport = port;
rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
if (ret < 0)
dev_debug(adap, "Failed to write L2T entry: %d",
ret);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
diff --git a/drivers/net/cxgbe/l2t.h b/drivers/net/cxgbe/l2t.h
index e4c0ebe..67d0197 100644
--- a/drivers/net/cxgbe/l2t.h
+++ b/drivers/net/cxgbe/l2t.h
@@ -30,7 +30,7 @@ struct l2t_entry {
u8 lport; /* destination port */
u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
rte_spinlock_t lock; /* entry lock */
- u32 refcnt; /* entry reference count */
+ RTE_ATOMIC(u32) refcnt; /* entry reference count */
};
struct l2t_data {
diff --git a/drivers/net/cxgbe/mps_tcam.c b/drivers/net/cxgbe/mps_tcam.c
index 8e0da9c..79a7daa 100644
--- a/drivers/net/cxgbe/mps_tcam.c
+++ b/drivers/net/cxgbe/mps_tcam.c
@@ -76,7 +76,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
t4_os_write_lock(&mpstcam->lock);
entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
if (entry) {
- __atomic_fetch_add(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
t4_os_write_unlock(&mpstcam->lock);
return entry->idx;
}
@@ -98,7 +98,7 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
entry = &mpstcam->entry[ret];
memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
if (cxgbe_update_free_idx(mpstcam))
@@ -147,7 +147,7 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
* provided value is -1
*/
if (entry->state == MPS_ENTRY_UNUSED) {
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_USED;
}
@@ -165,7 +165,7 @@ static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
{
memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
entry->state = MPS_ENTRY_UNUSED;
}
@@ -190,12 +190,13 @@ int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
return -EINVAL;
}
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
entry->mask, idx, 1, pi->port_id,
false);
else
- ret = __atomic_fetch_sub(&entry->refcnt, 1, __ATOMIC_RELAXED) - 1;
+ ret = rte_atomic_fetch_sub_explicit(&entry->refcnt, 1,
+ rte_memory_order_relaxed) - 1;
if (ret == 0) {
reset_mpstcam_entry(entry);
@@ -222,7 +223,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) == 1)
goto out_unlock;
ret = t4_alloc_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -231,7 +232,7 @@ int cxgbe_mpstcam_rawf_enable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 1, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
@@ -253,7 +254,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
t4_os_write_lock(&t->lock);
rawf_idx = adap->params.rawf_start + pi->port_id;
entry = &t->entry[rawf_idx];
- if (__atomic_load_n(&entry->refcnt, __ATOMIC_RELAXED) != 1)
+ if (rte_atomic_load_explicit(&entry->refcnt, rte_memory_order_relaxed) != 1)
goto out_unlock;
ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
@@ -262,7 +263,7 @@ int cxgbe_mpstcam_rawf_disable(struct port_info *pi)
if (ret < 0)
goto out_unlock;
- __atomic_store_n(&entry->refcnt, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&entry->refcnt, 0, rte_memory_order_relaxed);
out_unlock:
t4_os_write_unlock(&t->lock);
diff --git a/drivers/net/cxgbe/mps_tcam.h b/drivers/net/cxgbe/mps_tcam.h
index 363786b..4b421f7 100644
--- a/drivers/net/cxgbe/mps_tcam.h
+++ b/drivers/net/cxgbe/mps_tcam.h
@@ -29,7 +29,7 @@ struct mps_tcam_entry {
u8 mask[RTE_ETHER_ADDR_LEN];
struct mpstcam_table *mpstcam; /* backptr */
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
};
struct mpstcam_table {
diff --git a/drivers/net/cxgbe/smt.c b/drivers/net/cxgbe/smt.c
index 4e14a73..2f961c1 100644
--- a/drivers/net/cxgbe/smt.c
+++ b/drivers/net/cxgbe/smt.c
@@ -119,7 +119,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end, *first_free = NULL;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
if (!first_free)
first_free = e;
} else {
@@ -156,7 +156,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
e = find_or_alloc_smte(s, smac);
if (e) {
t4_os_lock(&e->lock);
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
e->pfvf = pfvf;
rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
ret = write_smt_entry(dev, e);
@@ -168,9 +168,9 @@ static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
goto out_write_unlock;
}
e->state = SMT_STATE_SWITCHING;
- __atomic_store_n(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
} else {
- __atomic_fetch_add(&e->refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
t4_os_unlock(&e->lock);
}
@@ -195,8 +195,8 @@ struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
void cxgbe_smt_release(struct smt_entry *e)
{
- if (__atomic_load_n(&e->refcnt, __ATOMIC_RELAXED) != 0)
- __atomic_fetch_sub(&e->refcnt, 1, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
+ rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
}
/**
diff --git a/drivers/net/cxgbe/smt.h b/drivers/net/cxgbe/smt.h
index 531810e..8b378ae 100644
--- a/drivers/net/cxgbe/smt.h
+++ b/drivers/net/cxgbe/smt.h
@@ -23,7 +23,7 @@ struct smt_entry {
u16 pfvf;
u16 hw_idx;
u8 src_mac[RTE_ETHER_ADDR_LEN];
- u32 refcnt;
+ RTE_ATOMIC(u32) refcnt;
rte_spinlock_t lock;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 13/45] net/gve: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (11 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 12/45] net/cxgbe: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 14/45] net/memif: " Tyler Retzlaff
` (32 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/gve/base/gve_osdep.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/gve/base/gve_osdep.h b/drivers/net/gve/base/gve_osdep.h
index a3702f4..c0ee0d5 100644
--- a/drivers/net/gve/base/gve_osdep.h
+++ b/drivers/net/gve/base/gve_osdep.h
@@ -135,7 +135,7 @@ struct gve_dma_mem {
static inline void *
gve_alloc_dma_mem(struct gve_dma_mem *mem, u64 size)
{
- static uint16_t gve_dma_memzone_id;
+ static RTE_ATOMIC(uint16_t) gve_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -143,7 +143,7 @@ struct gve_dma_mem {
return NULL;
snprintf(z_name, sizeof(z_name), "gve_dma_%u",
- __atomic_fetch_add(&gve_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&gve_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG,
PAGE_SIZE);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 14/45] net/memif: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (12 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 13/45] net/gve: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 15/45] net/thunderx: " Tyler Retzlaff
` (31 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/memif/memif.h | 4 ++--
drivers/net/memif/rte_eth_memif.c | 50 +++++++++++++++++++--------------------
2 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/memif/memif.h b/drivers/net/memif/memif.h
index f5a4693..3f5b407 100644
--- a/drivers/net/memif/memif.h
+++ b/drivers/net/memif/memif.h
@@ -169,9 +169,9 @@ typedef struct __rte_packed __rte_aligned(128)
uint32_t cookie; /**< MEMIF_COOKIE */
uint16_t flags; /**< flags */
#define MEMIF_RING_FLAG_MASK_INT 1 /**< disable interrupt mode */
- uint16_t head; /**< pointer to ring buffer head */
+ RTE_ATOMIC(uint16_t) head; /**< pointer to ring buffer head */
MEMIF_CACHELINE_ALIGN_MARK(cacheline1);
- uint16_t tail; /**< pointer to ring buffer tail */
+ RTE_ATOMIC(uint16_t) tail; /**< pointer to ring buffer tail */
MEMIF_CACHELINE_ALIGN_MARK(cacheline2);
memif_desc_t desc[0]; /**< buffer descriptors */
} memif_ring_t;
diff --git a/drivers/net/memif/rte_eth_memif.c b/drivers/net/memif/rte_eth_memif.c
index 18377d9..16da22b 100644
--- a/drivers/net/memif/rte_eth_memif.c
+++ b/drivers/net/memif/rte_eth_memif.c
@@ -262,7 +262,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* in function eth_memif_rx for C2S queues.
*/
- cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ cur_tail = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
while (mq->last_tail != cur_tail) {
RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]);
rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]);
@@ -334,10 +334,10 @@ struct mp_region_msg {
if (type == MEMIF_RING_C2S) {
cur_slot = mq->last_head;
- last_slot = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire);
} else {
cur_slot = mq->last_tail;
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
}
if (cur_slot == last_slot)
@@ -473,7 +473,7 @@ struct mp_region_msg {
no_free_bufs:
if (type == MEMIF_RING_C2S) {
- __atomic_store_n(&ring->tail, cur_slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, cur_slot, rte_memory_order_release);
mq->last_head = cur_slot;
} else {
mq->last_tail = cur_slot;
@@ -485,7 +485,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
while (n_slots--) {
@@ -493,7 +493,7 @@ struct mp_region_msg {
d0 = &ring->desc[s0];
d0->length = pmd->run.pkt_buffer_size;
}
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
}
mq->n_pkts += n_rx_pkts;
@@ -541,7 +541,7 @@ struct mp_region_msg {
* threads, so using load-acquire pairs with store-release
* to synchronize it between threads.
*/
- last_slot = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ last_slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
if (cur_slot == last_slot)
goto refill;
n_slots = last_slot - cur_slot;
@@ -591,7 +591,7 @@ struct mp_region_msg {
* is called in the context of receiver thread. The loads in
* the receiver do not need to synchronize with its own stores.
*/
- head = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ head = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_slots = ring_size - head + mq->last_tail;
if (n_slots < 32)
@@ -620,7 +620,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_tx.
*/
- __atomic_store_n(&ring->head, head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, head, rte_memory_order_release);
mq->n_pkts += n_rx_pkts;
@@ -668,9 +668,9 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot +
- __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE);
+ rte_atomic_load_explicit(&ring->tail, rte_memory_order_acquire);
} else {
/* For S2C queues ring->tail is updated by the sender and
* this function is called in the context of sending thread.
@@ -678,8 +678,8 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->tail, __ATOMIC_RELAXED);
- n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot;
+ slot = rte_atomic_load_explicit(&ring->tail, rte_memory_order_relaxed);
+ n_free = rte_atomic_load_explicit(&ring->head, rte_memory_order_acquire) - slot;
}
uint16_t i;
@@ -792,9 +792,9 @@ struct mp_region_msg {
no_free_slots:
if (type == MEMIF_RING_C2S)
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
else
- __atomic_store_n(&ring->tail, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->tail, slot, rte_memory_order_release);
if (((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) &&
(rte_intr_fd_get(mq->intr_handle) >= 0)) {
@@ -882,7 +882,7 @@ struct mp_region_msg {
* its own stores. Hence, the following load can be a
* relaxed load.
*/
- slot = __atomic_load_n(&ring->head, __ATOMIC_RELAXED);
+ slot = rte_atomic_load_explicit(&ring->head, rte_memory_order_relaxed);
n_free = ring_size - slot + mq->last_tail;
int used_slots;
@@ -942,7 +942,7 @@ struct mp_region_msg {
* threads, so using store-release pairs with load-acquire
* in function eth_memif_rx for C2S rings.
*/
- __atomic_store_n(&ring->head, slot, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&ring->head, slot, rte_memory_order_release);
/* Send interrupt, if enabled. */
if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
@@ -1155,8 +1155,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_c2s_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_C2S, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1175,8 +1175,8 @@ struct mp_region_msg {
for (i = 0; i < pmd->run.num_s2c_rings; i++) {
ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2C, i);
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
ring->cookie = MEMIF_COOKIE;
ring->flags = 0;
@@ -1314,8 +1314,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
@@ -1330,8 +1330,8 @@ struct mp_region_msg {
MIF_LOG(ERR, "Wrong ring");
return -1;
}
- __atomic_store_n(&ring->head, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ring->tail, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&ring->head, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ring->tail, 0, rte_memory_order_relaxed);
mq->last_head = 0;
mq->last_tail = 0;
/* enable polling mode */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 15/45] net/thunderx: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (13 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 14/45] net/memif: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 16/45] net/virtio: " Tyler Retzlaff
` (30 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/thunderx/nicvf_rxtx.c | 9 +++++----
drivers/net/thunderx/nicvf_struct.h | 4 ++--
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 74f43b9..76b6fdb 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -374,8 +374,8 @@ static const alignas(RTE_CACHE_LINE_SIZE) uint32_t ptype_table[16][16] = {
NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
(nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
- next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
- __ATOMIC_ACQUIRE);
+ next_tail = rte_atomic_fetch_add_explicit(&rbdr->next_tail, to_fill,
+ rte_memory_order_acquire);
ltail = next_tail;
for (i = 0; i < to_fill; i++) {
struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
@@ -385,9 +385,10 @@ static const alignas(RTE_CACHE_LINE_SIZE) uint32_t ptype_table[16][16] = {
ltail++;
}
- rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&rbdr->tail, next_tail,
+ rte_memory_order_relaxed);
- __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&rbdr->tail, ltail, rte_memory_order_release);
nicvf_addr_write(door, to_fill);
return to_fill;
}
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index cfcd942..60d3ec0 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -20,8 +20,8 @@ struct __rte_cache_aligned nicvf_rbdr {
struct rbdr_entry_t *desc;
nicvf_iova_addr_t phys;
uint32_t buffsz;
- uint32_t tail;
- uint32_t next_tail;
+ RTE_ATOMIC(uint32_t) tail;
+ RTE_ATOMIC(uint32_t) next_tail;
uint32_t head;
uint32_t qlen_mask;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 16/45] net/virtio: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (14 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 15/45] net/thunderx: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 17/45] net/hinic: " Tyler Retzlaff
` (29 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/virtio/virtio_ring.h | 4 +--
drivers/net/virtio/virtio_user/virtio_user_dev.c | 12 ++++-----
drivers/net/virtio/virtqueue.h | 32 ++++++++++++------------
3 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index e848c0b..2a25751 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -59,7 +59,7 @@ struct vring_used_elem {
struct vring_used {
uint16_t flags;
- uint16_t idx;
+ RTE_ATOMIC(uint16_t) idx;
struct vring_used_elem ring[];
};
@@ -70,7 +70,7 @@ struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t id;
- uint16_t flags;
+ RTE_ATOMIC(uint16_t) flags;
};
#define RING_EVENT_FLAGS_ENABLE 0x0
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4fdfe70..24e2b2c 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -948,7 +948,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+ uint16_t flags = rte_atomic_load_explicit(&desc->flags, rte_memory_order_acquire);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -1037,8 +1037,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vring->desc[vq->used_idx].flags, flags,
+ rte_memory_order_release);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {
@@ -1057,9 +1057,9 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
struct vring *vring = &dev->vrings.split[queue_idx];
/* Consume avail ring, using used ring idx as first one */
- while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ while (rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
!= vring->avail->idx) {
- avail_idx = __atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
+ avail_idx = rte_atomic_load_explicit(&vring->used->idx, rte_memory_order_relaxed)
& (vring->num - 1);
desc_idx = vring->avail->ring[avail_idx];
@@ -1070,7 +1070,7 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
uep->id = desc_idx;
uep->len = n_descs;
- __atomic_fetch_add(&vring->used->idx, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&vring->used->idx, 1, rte_memory_order_relaxed);
}
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 75d70f1..60211a4 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -37,7 +37,7 @@
virtio_mb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
else
rte_mb();
}
@@ -46,7 +46,7 @@
virtio_rmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
else
rte_io_rmb();
}
@@ -55,7 +55,7 @@
virtio_wmb(uint8_t weak_barriers)
{
if (weak_barriers)
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
else
rte_io_wmb();
}
@@ -67,12 +67,12 @@
uint16_t flags;
if (weak_barriers) {
-/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports
+/* x86 prefers to using rte_io_rmb over rte_atomic_load_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+ flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);
#else
flags = dp->flags;
rte_io_rmb();
@@ -90,12 +90,12 @@
uint16_t flags, uint8_t weak_barriers)
{
if (weak_barriers) {
-/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports
+/* x86 prefers to using rte_io_wmb over rte_atomic_store_explicit as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
* The if and else branch are identical on the platforms except Arm.
*/
#ifdef RTE_ARCH_ARM
- __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);
#else
rte_io_wmb();
dp->flags = flags;
@@ -425,7 +425,7 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
if (vq->hw->weak_barriers) {
/**
- * x86 prefers to using rte_smp_rmb over __atomic_load_n as it
+ * x86 prefers to using rte_smp_rmb over rte_atomic_load_explicit as it
* reports a slightly better perf, which comes from the saved
* branch by the compiler.
* The if and else branches are identical with the smp and io
@@ -435,8 +435,8 @@ struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
idx = vq->vq_split.ring.used->idx;
rte_smp_rmb();
#else
- idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx,
- __ATOMIC_ACQUIRE);
+ idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx,
+ rte_memory_order_acquire);
#endif
} else {
idx = vq->vq_split.ring.used->idx;
@@ -454,7 +454,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
vq_update_avail_idx(struct virtqueue *vq)
{
if (vq->hw->weak_barriers) {
- /* x86 prefers to using rte_smp_wmb over __atomic_store_n as
+ /* x86 prefers to using rte_smp_wmb over rte_atomic_store_explicit as
* it reports a slightly better perf, which comes from the
* saved branch by the compiler.
* The if and else branches are identical with the smp and
@@ -464,8 +464,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
rte_smp_wmb();
vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
#else
- __atomic_store_n(&vq->vq_split.ring.avail->idx,
- vq->vq_avail_idx, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&vq->vq_split.ring.avail->idx,
+ vq->vq_avail_idx, rte_memory_order_release);
#endif
} else {
rte_io_wmb();
@@ -528,8 +528,8 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
- used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
- __ATOMIC_RELAXED); \
+ used_idx = rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, \
+ rte_memory_order_relaxed); \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
if (virtio_with_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
@@ -546,7 +546,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
" avail.flags=0x%x; used.flags=0x%x", \
(vq)->vq_nentries, (vq)->vq_free_cnt, nused, (vq)->vq_desc_head_idx, \
(vq)->vq_split.ring.avail->idx, (vq)->vq_used_cons_idx, \
- __atomic_load_n(&(vq)->vq_split.ring.used->idx, __ATOMIC_RELAXED), \
+ rte_atomic_load_explicit(&(vq)->vq_split.ring.used->idx, rte_memory_order_relaxed), \
(vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
} while (0)
#else
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 17/45] net/hinic: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (15 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 16/45] net/virtio: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 18/45] net/idpf: " Tyler Retzlaff
` (28 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/hinic/hinic_pmd_rx.c | 2 +-
drivers/net/hinic/hinic_pmd_rx.h | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 7adb6e3..c2cd295 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -1004,7 +1004,7 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
while (pkts < nb_pkts) {
/* 2. current ci is done */
rx_cqe = &rxq->rx_cqe[sw_ci];
- status = __atomic_load_n(&rx_cqe->status, __ATOMIC_ACQUIRE);
+ status = rte_atomic_load_explicit(&rx_cqe->status, rte_memory_order_acquire);
if (!HINIC_GET_RX_DONE_BE(status))
break;
diff --git a/drivers/net/hinic/hinic_pmd_rx.h b/drivers/net/hinic/hinic_pmd_rx.h
index 2dde3ec..43c236b 100644
--- a/drivers/net/hinic/hinic_pmd_rx.h
+++ b/drivers/net/hinic/hinic_pmd_rx.h
@@ -33,7 +33,7 @@ struct __rte_cache_aligned hinic_rq_cqe {
#else
struct hinic_rq_cqe {
#endif
- u32 status;
+ RTE_ATOMIC(u32) status;
u32 vlan_len;
u32 offload_type;
u32 rss_hash;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 18/45] net/idpf: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (16 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 17/45] net/hinic: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 19/45] net/qede: " Tyler Retzlaff
` (27 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/idpf/idpf_ethdev.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 86151c9..1df4d6b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -259,8 +259,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
- __ATOMIC_RELAXED);
+ mbuf_alloc_failed += rte_atomic_load_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ rte_memory_order_relaxed);
}
return mbuf_alloc_failed;
@@ -308,7 +308,8 @@ struct rte_idpf_xstats_name_off {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&rxq->rx_stats.mbuf_alloc_failed, 0,
+ rte_memory_order_relaxed);
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 19/45] net/qede: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (17 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 18/45] net/idpf: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 20/45] net/ring: " Tyler Retzlaff
` (26 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/qede/base/bcm_osal.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2edeb38..abd1186 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -51,11 +51,11 @@ void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
/* Counter to track current memzone allocated */
static uint16_t ecore_mz_count;
-static uint32_t ref_cnt;
+static RTE_ATOMIC(uint32_t) ref_cnt;
int ecore_mz_mapping_alloc(void)
{
- if (__atomic_fetch_add(&ref_cnt, 1, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_fetch_add_explicit(&ref_cnt, 1, rte_memory_order_relaxed) == 0) {
ecore_mz_mapping = rte_calloc("ecore_mz_map",
rte_memzone_max_get(), sizeof(struct rte_memzone *), 0);
}
@@ -68,7 +68,7 @@ int ecore_mz_mapping_alloc(void)
void ecore_mz_mapping_free(void)
{
- if (__atomic_fetch_sub(&ref_cnt, 1, __ATOMIC_RELAXED) - 1 == 0) {
+ if (rte_atomic_fetch_sub_explicit(&ref_cnt, 1, rte_memory_order_relaxed) - 1 == 0) {
rte_free(ecore_mz_mapping);
ecore_mz_mapping = NULL;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 20/45] net/ring: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (18 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 19/45] net/qede: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 21/45] vdpa/mlx5: " Tyler Retzlaff
` (25 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ring/rte_eth_ring.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 48953dd..b16f5d5 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -44,8 +44,8 @@ enum dev_action {
struct ring_queue {
struct rte_ring *rng;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_internals {
@@ -82,7 +82,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts += nb_rx;
else
- __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->rx_pkts, nb_rx, rte_memory_order_relaxed);
return nb_rx;
}
@@ -96,7 +96,7 @@ struct pmd_internals {
if (r->rng->flags & RING_F_SP_ENQ)
r->tx_pkts += nb_tx;
else
- __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&r->tx_pkts, nb_tx, rte_memory_order_relaxed);
return nb_tx;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 21/45] vdpa/mlx5: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (19 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 20/45] net/ring: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 22/45] raw/ifpga: " Tyler Retzlaff
` (24 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/vdpa/mlx5/mlx5_vdpa.c | 24 +++++++++---------
drivers/vdpa/mlx5/mlx5_vdpa.h | 14 +++++------
drivers/vdpa/mlx5/mlx5_vdpa_cthread.c | 46 +++++++++++++++++------------------
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 4 ++-
drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 ++-
6 files changed, 52 insertions(+), 44 deletions(-)
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index f900384..98c39a5 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -261,8 +261,8 @@
uint32_t timeout = 0;
/* Check and wait all close tasks done. */
- while (__atomic_load_n(&priv->dev_close_progress,
- __ATOMIC_RELAXED) != 0 && timeout < 1000) {
+ while (rte_atomic_load_explicit(&priv->dev_close_progress,
+ rte_memory_order_relaxed) != 0 && timeout < 1000) {
rte_delay_us_sleep(10000);
timeout++;
}
@@ -294,8 +294,8 @@
priv->last_c_thrd_idx = 0;
else
priv->last_c_thrd_idx++;
- __atomic_store_n(&priv->dev_close_progress,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress,
+ 1, rte_memory_order_relaxed);
if (mlx5_vdpa_task_add(priv,
priv->last_c_thrd_idx,
MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
@@ -319,8 +319,8 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&priv->dev_close_progress, 0,
+ rte_memory_order_relaxed);
priv->state = MLX5_VDPA_STATE_PROBED;
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
return ret;
@@ -664,7 +664,9 @@
static int
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t max_queues, index, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
@@ -847,8 +849,8 @@
if (conf_thread_mng.initializer_priv == priv)
if (mlx5_vdpa_mult_threads_create())
goto error;
- __atomic_fetch_add(&conf_thread_mng.refcnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&conf_thread_mng.refcnt, 1,
+ rte_memory_order_relaxed);
}
if (mlx5_vdpa_create_dev_resources(priv))
goto error;
@@ -937,8 +939,8 @@
if (priv->vdev)
rte_vdpa_unregister_device(priv->vdev);
if (priv->use_c_thread)
- if (__atomic_fetch_sub(&conf_thread_mng.refcnt,
- 1, __ATOMIC_RELAXED) == 1)
+ if (rte_atomic_fetch_sub_explicit(&conf_thread_mng.refcnt,
+ 1, rte_memory_order_relaxed) == 1)
mlx5_vdpa_mult_threads_destroy(true);
rte_free(priv);
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 4ce6977..e156520 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -93,8 +93,8 @@ enum mlx5_vdpa_task_type {
struct __rte_aligned(4) mlx5_vdpa_task {
struct mlx5_vdpa_priv *priv;
enum mlx5_vdpa_task_type type;
- uint32_t *remaining_cnt;
- uint32_t *err_cnt;
+ RTE_ATOMIC(uint32_t) *remaining_cnt;
+ RTE_ATOMIC(uint32_t) *err_cnt;
uint32_t idx;
} __rte_packed;
@@ -107,7 +107,7 @@ struct mlx5_vdpa_c_thread {
struct mlx5_vdpa_conf_thread_mng {
void *initializer_priv;
- uint32_t refcnt;
+ RTE_ATOMIC(uint32_t) refcnt;
uint32_t max_thrds;
pthread_mutex_t cthrd_lock;
struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
@@ -212,7 +212,7 @@ struct mlx5_vdpa_priv {
uint64_t features; /* Negotiated features. */
uint16_t log_max_rqt_size;
uint16_t last_c_thrd_idx;
- uint16_t dev_close_progress;
+ RTE_ATOMIC(uint16_t) dev_close_progress;
uint16_t num_mrs; /* Number of memory regions. */
struct mlx5_vdpa_steer steer;
struct mlx5dv_var *var;
@@ -581,13 +581,13 @@ int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num);
int
mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time);
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time);
int
mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
void
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
index 68ed841..84f611c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_cthread.c
@@ -48,7 +48,7 @@
mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
uint32_t thrd_idx,
enum mlx5_vdpa_task_type task_type,
- uint32_t *remaining_cnt, uint32_t *err_cnt,
+ RTE_ATOMIC(uint32_t) *remaining_cnt, RTE_ATOMIC(uint32_t) *err_cnt,
void **task_data, uint32_t num)
{
struct rte_ring *rng = conf_thread_mng.cthrd[thrd_idx].rng;
@@ -70,8 +70,8 @@
return -1;
for (i = 0 ; i < num; i++)
if (task[i].remaining_cnt)
- __atomic_fetch_add(task[i].remaining_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task[i].remaining_cnt, 1,
+ rte_memory_order_relaxed);
/* wake up conf thread. */
pthread_mutex_lock(&conf_thread_mng.cthrd_lock);
pthread_cond_signal(&conf_thread_mng.cthrd[thrd_idx].c_cond);
@@ -80,16 +80,16 @@
}
bool
-mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
- uint32_t *err_cnt, uint32_t sleep_time)
+mlx5_vdpa_c_thread_wait_bulk_tasks_done(RTE_ATOMIC(uint32_t) *remaining_cnt,
+ RTE_ATOMIC(uint32_t) *err_cnt, uint32_t sleep_time)
{
/* Check and wait all tasks done. */
- while (__atomic_load_n(remaining_cnt,
- __ATOMIC_RELAXED) != 0) {
+ while (rte_atomic_load_explicit(remaining_cnt,
+ rte_memory_order_relaxed) != 0) {
rte_delay_us_sleep(sleep_time);
}
- if (__atomic_load_n(err_cnt,
- __ATOMIC_RELAXED)) {
+ if (rte_atomic_load_explicit(err_cnt,
+ rte_memory_order_relaxed)) {
DRV_LOG(ERR, "Tasks done with error.");
return true;
}
@@ -137,8 +137,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to register mr %d.", task.idx);
- __atomic_fetch_add(task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(task.err_cnt, 1,
+ rte_memory_order_relaxed);
}
break;
case MLX5_VDPA_TASK_SETUP_VIRTQ:
@@ -149,8 +149,8 @@
if (ret) {
DRV_LOG(ERR,
"Failed to setup virtq %d.", task.idx);
- __atomic_fetch_add(
- task.err_cnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ task.err_cnt, 1, rte_memory_order_relaxed);
}
virtq->enable = 1;
pthread_mutex_unlock(&virtq->virtq_lock);
@@ -164,9 +164,9 @@
DRV_LOG(ERR,
"Failed to stop virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -176,9 +176,9 @@
DRV_LOG(ERR,
"Failed to get negotiated features virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
pthread_mutex_unlock(&virtq->virtq_lock);
break;
}
@@ -200,9 +200,9 @@
if (!priv->connected)
mlx5_vdpa_dev_cache_clean(priv);
priv->vid = 0;
- __atomic_store_n(
+ rte_atomic_store_explicit(
&priv->dev_close_progress, 0,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
break;
case MLX5_VDPA_TASK_PREPARE_VIRTQ:
ret = mlx5_vdpa_virtq_single_resource_prepare(
@@ -211,9 +211,9 @@
DRV_LOG(ERR,
"Failed to prepare virtq %d.",
task.idx);
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
task.err_cnt, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
break;
default:
@@ -222,8 +222,8 @@
break;
}
if (task.remaining_cnt)
- __atomic_fetch_sub(task.remaining_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(task.remaining_cnt,
+ 1, rte_memory_order_relaxed);
}
return 0;
}
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 0fa671f..a207734 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -92,7 +92,9 @@
int
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
{
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
uint64_t features;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index e333f0b..4dfe800 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -279,7 +279,9 @@
uint8_t mode = 0;
int ret = -rte_errno;
uint32_t i, thrd_idx, data[1];
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
struct rte_vhost_memory *mem = mlx5_vdpa_vhost_mem_regions_prepare
(priv->vid, &mode, &priv->vmem_info.size,
&priv->vmem_info.gcd, &priv->vmem_info.entries_num);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 607e290..093cdd0 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -666,7 +666,9 @@
{
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
- uint32_t remaining_cnt = 0, err_cnt = 0, task_num = 0;
+ RTE_ATOMIC(uint32_t) remaining_cnt = 0;
+ RTE_ATOMIC(uint32_t) err_cnt = 0;
+ uint32_t task_num = 0;
uint32_t i, thrd_idx, data[1];
struct mlx5_vdpa_virtq *virtq;
struct rte_vhost_vring vq;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 22/45] raw/ifpga: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (20 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 21/45] vdpa/mlx5: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 23/45] event/opdl: " Tyler Retzlaff
` (23 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/raw/ifpga/ifpga_rawdev.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/raw/ifpga/ifpga_rawdev.c b/drivers/raw/ifpga/ifpga_rawdev.c
index f89bd3f..78d3c88 100644
--- a/drivers/raw/ifpga/ifpga_rawdev.c
+++ b/drivers/raw/ifpga/ifpga_rawdev.c
@@ -73,7 +73,7 @@
static struct ifpga_rawdev ifpga_rawdevices[IFPGA_RAWDEV_NUM];
-static int ifpga_monitor_refcnt;
+static RTE_ATOMIC(int) ifpga_monitor_refcnt;
static rte_thread_t ifpga_monitor_start_thread;
static struct ifpga_rawdev *
@@ -512,7 +512,7 @@ static int set_surprise_link_check_aer(
int gsd_enable, ret;
#define MS 1000
- while (__atomic_load_n(&ifpga_monitor_refcnt, __ATOMIC_RELAXED)) {
+ while (rte_atomic_load_explicit(&ifpga_monitor_refcnt, rte_memory_order_relaxed)) {
gsd_enable = 0;
for (i = 0; i < IFPGA_RAWDEV_NUM; i++) {
ifpga_rdev = &ifpga_rawdevices[i];
@@ -549,7 +549,7 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 1;
- if (!__atomic_fetch_add(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED)) {
+ if (!rte_atomic_fetch_add_explicit(&ifpga_monitor_refcnt, 1, rte_memory_order_relaxed)) {
ret = rte_thread_create_internal_control(&ifpga_monitor_start_thread,
"ifpga-mon", ifpga_rawdev_gsd_handle, NULL);
if (ret != 0) {
@@ -573,7 +573,8 @@ static int set_surprise_link_check_aer(
dev->poll_enabled = 0;
- if (!(__atomic_fetch_sub(&ifpga_monitor_refcnt, 1, __ATOMIC_RELAXED) - 1) &&
+ if (!(rte_atomic_fetch_sub_explicit(&ifpga_monitor_refcnt, 1,
+ rte_memory_order_relaxed) - 1) &&
ifpga_monitor_start_thread.opaque_id != 0) {
ret = pthread_cancel((pthread_t)ifpga_monitor_start_thread.opaque_id);
if (ret)
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 23/45] event/opdl: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (21 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 22/45] raw/ifpga: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 24/45] event/octeontx: " Tyler Retzlaff
` (22 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/opdl/opdl_ring.c | 80 +++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index e87ffd5..3476f6b 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -47,12 +47,12 @@ struct __rte_cache_aligned shared_state {
/* Last known minimum sequence number of dependencies, used for multi
* thread operation
*/
- uint32_t available_seq;
+ RTE_ATOMIC(uint32_t) available_seq;
char _pad1[RTE_CACHE_LINE_SIZE * 3];
- uint32_t head; /* Head sequence number (for multi thread operation) */
+ RTE_ATOMIC(uint32_t) head; /* Head sequence number (for multi thread operation) */
char _pad2[RTE_CACHE_LINE_SIZE * 3];
struct opdl_stage *stage; /* back pointer */
- uint32_t tail; /* Tail sequence number */
+ RTE_ATOMIC(uint32_t) tail; /* Tail sequence number */
char _pad3[RTE_CACHE_LINE_SIZE * 2];
};
@@ -149,10 +149,10 @@ struct opdl_ring {
available(const struct opdl_stage *s)
{
if (s->threadsafe == true) {
- uint32_t n = __atomic_load_n(&s->shared.available_seq,
- __ATOMIC_ACQUIRE) -
- __atomic_load_n(&s->shared.head,
- __ATOMIC_ACQUIRE);
+ uint32_t n = rte_atomic_load_explicit(&s->shared.available_seq,
+ rte_memory_order_acquire) -
+ rte_atomic_load_explicit(&s->shared.head,
+ rte_memory_order_acquire);
/* Return 0 if available_seq needs to be updated */
return (n <= s->num_slots) ? n : 0;
@@ -168,7 +168,7 @@ struct opdl_ring {
{
uint32_t i;
uint32_t this_tail = s->shared.tail;
- uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+ uint32_t min_seq = rte_atomic_load_explicit(&s->deps[0]->tail, rte_memory_order_acquire);
/* Input stage sequence numbers are greater than the sequence numbers of
* its dependencies so an offset of t->num_slots is needed when
* calculating available slots and also the condition which is used to
@@ -179,16 +179,16 @@ struct opdl_ring {
if (is_input_stage(s)) {
wrap = s->num_slots;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((this_tail - seq) > (this_tail - min_seq))
min_seq = seq;
}
} else {
wrap = 0;
for (i = 1; i < s->num_deps; i++) {
- uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
- __ATOMIC_ACQUIRE);
+ uint32_t seq = rte_atomic_load_explicit(&s->deps[i]->tail,
+ rte_memory_order_acquire);
if ((seq - this_tail) < (min_seq - this_tail))
min_seq = seq;
}
@@ -197,8 +197,8 @@ struct opdl_ring {
if (s->threadsafe == false)
s->available_seq = min_seq + wrap;
else
- __atomic_store_n(&s->shared.available_seq, min_seq + wrap,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.available_seq, min_seq + wrap,
+ rte_memory_order_release);
}
/* Wait until the number of available slots reaches number requested */
@@ -298,7 +298,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -381,18 +381,18 @@ struct opdl_ring {
/* There should be no race condition here. If shared.tail
* matches, no other core can update it until this one does.
*/
- if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+ if (rte_atomic_load_explicit(&s->shared.tail, rte_memory_order_acquire) ==
tail) {
if (num_entries >= (head - tail)) {
claim_mgr_remove(disclaims);
- __atomic_store_n(&s->shared.tail, head,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, head,
+ rte_memory_order_release);
num_entries -= (head - tail);
} else {
claim_mgr_move_tail(disclaims, num_entries);
- __atomic_store_n(&s->shared.tail,
+ rte_atomic_store_explicit(&s->shared.tail,
num_entries + tail,
- __ATOMIC_RELEASE);
+ rte_memory_order_release);
num_entries = 0;
}
} else if (block == false)
@@ -420,7 +420,7 @@ struct opdl_ring {
opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
false);
- *old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+ *old_head = rte_atomic_load_explicit(&s->shared.head, rte_memory_order_acquire);
while (true) {
bool success;
/* If called by opdl_ring_input(), claim does not need to be
@@ -440,11 +440,10 @@ struct opdl_ring {
if (*num_entries == 0)
return;
- success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+ success = rte_atomic_compare_exchange_weak_explicit(&s->shared.head, old_head,
*old_head + *num_entries,
- true, /* may fail spuriously */
- __ATOMIC_RELEASE, /* memory order on success */
- __ATOMIC_ACQUIRE); /* memory order on fail */
+ rte_memory_order_release, /* memory order on success */
+ rte_memory_order_acquire); /* memory order on fail */
if (likely(success))
break;
rte_pause();
@@ -472,10 +471,11 @@ struct opdl_ring {
/* If another thread started inputting before this one, but hasn't
* finished, we need to wait for it to complete to update the tail.
*/
- rte_wait_until_equal_32(&s->shared.tail, old_head, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&s->shared.tail, old_head,
+ rte_memory_order_acquire);
- __atomic_store_n(&s->shared.tail, old_head + num_entries,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, old_head + num_entries,
+ rte_memory_order_release);
return num_entries;
}
@@ -525,8 +525,8 @@ struct opdl_ring {
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- event = __atomic_load_n(&(ev->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -627,8 +627,8 @@ struct opdl_ring {
num_entries, s->head - old_tail);
num_entries = s->head - old_tail;
}
- __atomic_store_n(&s->shared.tail, num_entries + old_tail,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, num_entries + old_tail,
+ rte_memory_order_release);
}
uint32_t
@@ -657,7 +657,7 @@ struct opdl_ring {
copy_entries_in(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
@@ -676,7 +676,7 @@ struct opdl_ring {
copy_entries_out(t, head, entries, num_entries);
s->head += num_entries;
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
return num_entries;
}
@@ -755,7 +755,7 @@ struct opdl_ring {
return 0;
}
if (s->threadsafe == false) {
- __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&s->shared.tail, s->head, rte_memory_order_release);
s->seq += s->num_claimed;
s->shadow_head = s->head;
s->num_claimed = 0;
@@ -1008,8 +1008,8 @@ struct opdl_ring *
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- event = __atomic_load_n(&(ev_orig->event),
- __ATOMIC_ACQUIRE);
+ event = rte_atomic_load_explicit((uint64_t __rte_atomic *)&ev_orig->event,
+ rte_memory_order_acquire);
opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
flow_id = OPDL_FLOWID_MASK & event;
@@ -1026,9 +1026,9 @@ struct opdl_ring *
if ((event & OPDL_EVENT_MASK) !=
ev_temp) {
- __atomic_store_n(&(ev_orig->event),
- ev_update,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(
+ (uint64_t __rte_atomic *)&ev_orig->event,
+ ev_update, rte_memory_order_release);
ev_updated = true;
}
if (ev_orig->u64 != ev->u64) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 24/45] event/octeontx: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (22 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 23/45] event/opdl: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 25/45] event/dsw: " Tyler Retzlaff
` (21 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/octeontx/timvf_evdev.h | 8 ++++----
drivers/event/octeontx/timvf_worker.h | 36 +++++++++++++++++------------------
2 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index e7a63e4..3a2dc47 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -126,15 +126,15 @@ enum timvf_clk_src {
struct __rte_aligned(8) tim_mem_bucket {
uint64_t first_chunk;
union {
- uint64_t w1;
+ RTE_ATOMIC(uint64_t) w1;
struct {
- uint32_t nb_entry;
+ RTE_ATOMIC(uint32_t) nb_entry;
uint8_t sbt:1;
uint8_t hbt:1;
uint8_t bsk:1;
uint8_t rsvd:5;
- uint8_t lock;
- int16_t chunk_remainder;
+ RTE_ATOMIC(uint8_t) lock;
+ RTE_ATOMIC(int16_t) chunk_remainder;
};
};
uint64_t current_chunk;
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index e4b923e..de9f1b0 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -19,22 +19,22 @@
static inline int16_t
timr_bkt_get_rem(struct tim_mem_bucket *bktp)
{
- return __atomic_load_n(&bktp->chunk_remainder,
- __ATOMIC_ACQUIRE);
+ return rte_atomic_load_explicit(&bktp->chunk_remainder,
+ rte_memory_order_acquire);
}
static inline void
timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_store_n(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline void
timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
{
- __atomic_fetch_sub(&bktp->chunk_remainder, v,
- __ATOMIC_RELEASE);
+ rte_atomic_fetch_sub_explicit(&bktp->chunk_remainder, v,
+ rte_memory_order_release);
}
static inline uint8_t
@@ -47,14 +47,14 @@
timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
- return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_or_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
{
const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint8_t
@@ -81,34 +81,34 @@
{
/*Clear everything except lock. */
const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
- __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ rte_memory_order_acq_rel);
}
static inline uint64_t
timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
{
- return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
- __ATOMIC_RELAXED);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, TIM_BUCKET_SEMA,
+ rte_memory_order_relaxed);
}
static inline uint64_t
timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
{
const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
- return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+ return rte_atomic_fetch_add_explicit(&bktp->w1, v, rte_memory_order_acq_rel);
}
static inline void
timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit(&bktp->lock, 0xff, rte_memory_order_acq_rel);
}
static inline uint32_t
@@ -121,13 +121,13 @@
static inline void
timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
{
- __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, 1, rte_memory_order_relaxed);
}
static inline void
timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
{
- __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&bktp->nb_entry, v, rte_memory_order_relaxed);
}
static inline uint64_t
@@ -135,7 +135,7 @@
{
const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
TIM_BUCKET_W1_S_NUM_ENTRIES);
- return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL) & v;
+ return rte_atomic_fetch_and_explicit(&bktp->w1, v, rte_memory_order_acq_rel) & v;
}
static inline struct tim_mem_entry *
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 25/45] event/dsw: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (23 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 24/45] event/octeontx: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 26/45] dma/skeleton: " Tyler Retzlaff
` (20 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.h | 6 +++---
drivers/event/dsw/dsw_event.c | 47 +++++++++++++++++++++++++++---------------
drivers/event/dsw/dsw_xstats.c | 4 ++--
3 files changed, 35 insertions(+), 22 deletions(-)
diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h
index 3a5989f..2018306 100644
--- a/drivers/event/dsw/dsw_evdev.h
+++ b/drivers/event/dsw/dsw_evdev.h
@@ -227,9 +227,9 @@ struct __rte_cache_aligned dsw_port {
alignas(RTE_CACHE_LINE_SIZE) struct rte_ring *ctl_in_ring;
/* Estimate of current port load. */
- alignas(RTE_CACHE_LINE_SIZE) int16_t load;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int16_t) load;
/* Estimate of flows currently migrating to this port. */
- alignas(RTE_CACHE_LINE_SIZE) int32_t immigration_load;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int32_t) immigration_load;
};
struct dsw_queue {
@@ -252,7 +252,7 @@ struct dsw_evdev {
uint8_t num_queues;
int32_t max_inflight;
- alignas(RTE_CACHE_LINE_SIZE) int32_t credits_on_loan;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(int32_t) credits_on_loan;
};
#define DSW_CTL_PAUS_REQ (0)
diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c
index 23488d9..70c3c3a 100644
--- a/drivers/event/dsw/dsw_event.c
+++ b/drivers/event/dsw/dsw_event.c
@@ -33,7 +33,8 @@
}
total_on_loan =
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed);
available = dsw->max_inflight - total_on_loan;
acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
@@ -45,13 +46,16 @@
* allocation.
*/
new_total_on_loan =
- __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED) + acquired_credits;
+ rte_atomic_fetch_add_explicit(&dsw->credits_on_loan,
+ acquired_credits,
+ rte_memory_order_relaxed) +
+ acquired_credits;
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
- __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan,
+ acquired_credits,
+ rte_memory_order_relaxed);
return false;
}
@@ -77,8 +81,9 @@
port->inflight_credits = leave_credits;
- __atomic_fetch_sub(&dsw->credits_on_loan, return_credits,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan,
+ return_credits,
+ rte_memory_order_relaxed);
DSW_LOG_DP_PORT(DEBUG, port->id,
"Returned %d tokens to pool.\n",
@@ -156,19 +161,22 @@
int16_t period_load;
int16_t new_load;
- old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
+ old_load = rte_atomic_load_explicit(&port->load,
+ rte_memory_order_relaxed);
period_load = dsw_port_load_close_period(port, now);
new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
(DSW_OLD_LOAD_WEIGHT+1);
- __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->load, new_load,
+ rte_memory_order_relaxed);
/* The load of the recently immigrated flows should hopefully
* be reflected the load estimate by now.
*/
- __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&port->immigration_load, 0,
+ rte_memory_order_relaxed);
}
static void
@@ -390,10 +398,11 @@ struct dsw_queue_flow_burst {
for (i = 0; i < dsw->num_ports; i++) {
int16_t measured_load =
- __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].load,
+ rte_memory_order_relaxed);
int32_t immigration_load =
- __atomic_load_n(&dsw->ports[i].immigration_load,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&dsw->ports[i].immigration_load,
+ rte_memory_order_relaxed);
int32_t load = measured_load + immigration_load;
load = RTE_MIN(load, DSW_MAX_LOAD);
@@ -523,8 +532,10 @@ struct dsw_queue_flow_burst {
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
- __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load,
- candidate_flow_load, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(
+ &dsw->ports[candidate_port_id].immigration_load,
+ candidate_flow_load,
+ rte_memory_order_relaxed);
return true;
}
@@ -882,7 +893,8 @@ struct dsw_queue_flow_burst {
}
source_port_load =
- __atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&source_port->load,
+ rte_memory_order_relaxed);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Load %d is below threshold level %d.\n",
@@ -1301,7 +1313,8 @@ struct dsw_queue_flow_burst {
* above the water mark.
*/
if (unlikely(num_new > 0 &&
- __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
+ rte_atomic_load_explicit(&dsw->credits_on_loan,
+ rte_memory_order_relaxed) >
source_port->new_event_threshold))
return 0;
diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c
index 2a83a28..f61dfd8 100644
--- a/drivers/event/dsw/dsw_xstats.c
+++ b/drivers/event/dsw/dsw_xstats.c
@@ -48,7 +48,7 @@ struct dsw_xstats_port {
static uint64_t
dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
{
- return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed);
}
static struct dsw_xstat_dev dsw_dev_xstats[] = {
@@ -126,7 +126,7 @@ struct dsw_xstats_port {
{
int16_t load;
- load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
+ load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed);
return DSW_LOAD_TO_PERCENT(load);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 26/45] dma/skeleton: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (24 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 25/45] event/dsw: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 27/45] crypto/octeontx: " Tyler Retzlaff
` (19 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/dma/skeleton/skeleton_dmadev.c | 5 +++--
drivers/dma/skeleton/skeleton_dmadev.h | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
index 48f88f9..926c188 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.c
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -142,7 +142,7 @@
else if (desc->op == SKELDMA_OP_FILL)
do_fill(desc);
- __atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(&hw->completed_count, 1, rte_memory_order_release);
(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
}
@@ -335,7 +335,8 @@
RTE_SET_USED(vchan);
*status = RTE_DMA_VCHAN_IDLE;
- if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
+ if (hw->submitted_count != rte_atomic_load_explicit(&hw->completed_count,
+ rte_memory_order_acquire)
|| hw->zero_req_count == 0)
*status = RTE_DMA_VCHAN_ACTIVE;
return 0;
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
index cfd37d1..0365f64 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.h
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -81,7 +81,7 @@ struct skeldma_hw {
/* Cache delimiter for cpuwork thread's operation data */
alignas(RTE_CACHE_LINE_SIZE) char cache2;
volatile uint32_t zero_req_count;
- uint64_t completed_count;
+ RTE_ATOMIC(uint64_t) completed_count;
};
#endif /* SKELETON_DMADEV_H */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 27/45] crypto/octeontx: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (25 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 26/45] dma/skeleton: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 28/45] common/mlx5: " Tyler Retzlaff
` (18 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/crypto/octeontx/otx_cryptodev_ops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 947e1be..bafd0c1 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -652,7 +652,7 @@
if (!rsp_info->sched_type)
ssows_head_wait(ws);
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
}
@@ -896,7 +896,7 @@
pcount = pending_queue_level(pqueue, DEFAULT_CMD_QLEN);
/* Ensure pcount isn't read before data lands */
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
count = (nb_ops > pcount) ? pcount : nb_ops;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 28/45] common/mlx5: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (26 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 27/45] crypto/octeontx: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 29/45] common/idpf: " Tyler Retzlaff
` (17 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/mlx5/linux/mlx5_nl.c | 5 +--
drivers/common/mlx5/mlx5_common.h | 2 +-
drivers/common/mlx5/mlx5_common_mr.c | 16 ++++-----
drivers/common/mlx5/mlx5_common_mr.h | 2 +-
drivers/common/mlx5/mlx5_common_utils.c | 32 +++++++++---------
drivers/common/mlx5/mlx5_common_utils.h | 6 ++--
drivers/common/mlx5/mlx5_malloc.c | 58 ++++++++++++++++-----------------
7 files changed, 61 insertions(+), 60 deletions(-)
diff --git a/drivers/common/mlx5/linux/mlx5_nl.c b/drivers/common/mlx5/linux/mlx5_nl.c
index 61192eb..a5ac4dc 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -175,10 +175,11 @@ struct mlx5_nl_port_info {
uint16_t state; /**< IB device port state (out). */
};
-uint32_t atomic_sn;
+RTE_ATOMIC(uint32_t) atomic_sn;
/* Generate Netlink sequence number. */
-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, __ATOMIC_RELAXED) + 1)
+#define MLX5_NL_SN_GENERATE (rte_atomic_fetch_add_explicit(&atomic_sn, 1, \
+ rte_memory_order_relaxed) + 1)
/**
* Opens a Netlink socket.
diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 9c80277..14c70ed 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -195,7 +195,7 @@ enum mlx5_cqe_status {
/* Prevent speculative reading of other fields in CQE until
* CQE is valid.
*/
- rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ rte_atomic_thread_fence(rte_memory_order_acquire);
if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
op_code == MLX5_CQE_REQ_ERR))
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index 85ec10d..50922ad 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -35,7 +35,7 @@ struct mlx5_range {
/** Memory region for a mempool. */
struct mlx5_mempool_mr {
struct mlx5_pmd_mr pmd_mr;
- uint32_t refcnt; /**< Number of mempools sharing this MR. */
+ RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
};
/* Mempool registration. */
@@ -56,11 +56,11 @@ struct mlx5_mempool_reg {
{
struct mlx5_mprq_buf *buf = opaque;
- if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+ if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 1) {
rte_mempool_put(buf->mp, buf);
- } else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0)) {
- __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+ } else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0)) {
+ rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
rte_mempool_put(buf->mp, buf);
}
}
@@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {
unsigned int i;
for (i = 0; i < mpr->mrs_n; i++)
- __atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, rte_memory_order_relaxed);
}
/**
@@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {
bool ret = false;
for (i = 0; i < mpr->mrs_n; i++)
- ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
- __ATOMIC_RELAXED) - 1 == 0;
+ ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
+ rte_memory_order_relaxed) - 1 == 0;
return ret;
}
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index aa10b68..a7f1042 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {
/* Multi-Packet RQ buffer header. */
struct __rte_cache_aligned mlx5_mprq_buf {
struct rte_mempool *mp;
- uint16_t refcnt; /* Atomically accessed refcnt. */
+ RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
struct rte_mbuf_ext_shared_info shinfos[];
/*
* Shared information per stride.
diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index e69d068..4b95d35 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -81,14 +81,14 @@ struct mlx5_list *
while (entry != NULL) {
if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
if (reuse) {
- ret = __atomic_fetch_add(&entry->ref_cnt, 1,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
+ rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
l_const->name, (void *)entry,
entry->ref_cnt);
} else if (lcore_index < MLX5_LIST_GLOBAL) {
- ret = __atomic_load_n(&entry->ref_cnt,
- __ATOMIC_RELAXED);
+ ret = rte_atomic_load_explicit(&entry->ref_cnt,
+ rte_memory_order_relaxed);
}
if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
return entry;
@@ -151,13 +151,13 @@ struct mlx5_list_entry *
{
struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
- uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
- __ATOMIC_RELAXED);
+ uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
+ rte_memory_order_relaxed);
while (inv_cnt != 0 && entry != NULL) {
struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
- if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+ if (rte_atomic_load_explicit(&entry->ref_cnt, rte_memory_order_relaxed) == 0) {
LIST_REMOVE(entry, next);
if (l_const->lcores_share)
l_const->cb_clone_free(l_const->ctx, entry);
@@ -217,7 +217,7 @@ struct mlx5_list_entry *
entry->lcore_idx = (uint32_t)lcore_index;
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
l_const->name, lcore_index,
(void *)entry, entry->ref_cnt);
@@ -254,7 +254,7 @@ struct mlx5_list_entry *
l_inconst->gen_cnt++;
rte_rwlock_write_unlock(&l_inconst->lock);
LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
- __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
(void *)entry, entry->ref_cnt);
return local_entry;
@@ -285,7 +285,7 @@ struct mlx5_list_entry *
{
struct mlx5_list_entry *gentry = entry->gentry;
- if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
if (entry->lcore_idx == (uint32_t)lcore_idx) {
LIST_REMOVE(entry, next);
@@ -294,23 +294,23 @@ struct mlx5_list_entry *
else
l_const->cb_remove(l_const->ctx, entry);
} else {
- __atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+ 1, rte_memory_order_relaxed);
}
if (!l_const->lcores_share) {
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)entry);
return 0;
}
- if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+ if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, rte_memory_order_relaxed) - 1 != 0)
return 1;
rte_rwlock_write_lock(&l_inconst->lock);
if (likely(gentry->ref_cnt == 0)) {
LIST_REMOVE(gentry, next);
rte_rwlock_write_unlock(&l_inconst->lock);
l_const->cb_remove(l_const->ctx, gentry);
- __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
l_const->name, (void *)gentry);
return 0;
@@ -377,7 +377,7 @@ struct mlx5_list_entry *
mlx5_list_get_entry_num(struct mlx5_list *list)
{
MLX5_ASSERT(list);
- return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+ return rte_atomic_load_explicit(&list->l_inconst.count, rte_memory_order_relaxed);
}
/********************* Hash List **********************/
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index 44eba50..c5eff7a 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -29,7 +29,7 @@
*/
struct mlx5_list_entry {
LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
- alignas(8) uint32_t ref_cnt; /* 0 means, entry is invalid. */
+ alignas(8) RTE_ATOMIC(uint32_t) ref_cnt; /* 0 means, entry is invalid. */
uint32_t lcore_idx;
union {
struct mlx5_list_entry *gentry;
@@ -39,7 +39,7 @@ struct mlx5_list_entry {
struct __rte_cache_aligned mlx5_list_cache {
LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
- uint32_t inv_cnt; /* Invalid entries counter. */
+ RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
};
/**
@@ -111,7 +111,7 @@ struct mlx5_list_const {
struct mlx5_list_inconst {
rte_rwlock_t lock; /* read/write lock. */
volatile uint32_t gen_cnt; /* List modification may update it. */
- volatile uint32_t count; /* number of entries in list. */
+ volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
struct mlx5_list_cache *cache[MLX5_LIST_MAX];
/* Lcore cache, last index is the global cache. */
};
diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
index c58c41d..ef6dabe 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -16,7 +16,7 @@ struct mlx5_sys_mem {
uint32_t init:1; /* Memory allocator initialized. */
uint32_t enable:1; /* System memory select. */
uint32_t reserve:30; /* Reserve. */
- struct rte_memseg_list *last_msl;
+ RTE_ATOMIC(struct rte_memseg_list *) last_msl;
/* last allocated rte memory memseg list. */
#ifdef RTE_LIBRTE_MLX5_DEBUG
uint64_t malloc_sys;
@@ -93,14 +93,14 @@ struct mlx5_sys_mem {
* different with the cached msl.
*/
if (addr && !mlx5_mem_check_msl(addr,
- (struct rte_memseg_list *)__atomic_load_n
- (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
- __atomic_store_n(&mlx5_sys_mem.last_msl,
+ (struct rte_memseg_list *)rte_atomic_load_explicit
+ (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
+ rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
rte_mem_virt2memseg_list(addr),
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
+ rte_memory_order_relaxed);
#endif
}
}
@@ -122,11 +122,11 @@ struct mlx5_sys_mem {
* to check if the memory belongs to rte memory.
*/
if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
- __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+ rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
if (!rte_mem_virt2memseg_list(addr))
return false;
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
#endif
}
return true;
@@ -185,8 +185,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -199,8 +199,8 @@ struct mlx5_sys_mem {
addr = malloc(size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- __atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return addr;
}
@@ -233,8 +233,8 @@ struct mlx5_sys_mem {
mlx5_mem_update_msl(new_addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -246,8 +246,8 @@ struct mlx5_sys_mem {
new_addr = realloc(addr, size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- __atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
+ rte_memory_order_relaxed);
#endif
return new_addr;
}
@@ -259,14 +259,14 @@ struct mlx5_sys_mem {
return;
if (!mlx5_mem_is_rte(addr)) {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
+ rte_memory_order_relaxed);
#endif
mlx5_os_free(addr);
} else {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- __atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
+ rte_memory_order_relaxed);
#endif
rte_free(addr);
}
@@ -280,14 +280,14 @@ struct mlx5_sys_mem {
" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
" update:%"PRIi64"",
- __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
- __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
+ rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
#endif
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 29/45] common/idpf: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (27 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 28/45] common/mlx5: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 30/45] common/iavf: " Tyler Retzlaff
` (16 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/idpf/idpf_common_device.h | 6 +++---
drivers/common/idpf/idpf_common_rxtx.c | 14 ++++++++------
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 16 ++++++++--------
4 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 3834c1f..bfa927a 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -48,7 +48,7 @@ struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
struct virtchnl2_get_capabilities caps;
- volatile uint32_t pend_cmd; /* pending command not finished */
+ volatile RTE_ATOMIC(uint32_t) pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from cp */
uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
@@ -179,8 +179,8 @@ struct idpf_cmd_info {
atomic_set_cmd(struct idpf_adapter *adapter, uint32_t ops)
{
uint32_t op_unk = VIRTCHNL2_OP_UNKNOWN;
- bool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,
- 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ bool ret = rte_atomic_compare_exchange_strong_explicit(&adapter->pend_cmd, &op_unk, ops,
+ rte_memory_order_acquire, rte_memory_order_acquire);
if (!ret)
DRV_LOG(ERR, "There is incomplete cmd %d", adapter->pend_cmd);
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 83b131e..b09c58c 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -592,8 +592,8 @@
next_avail = 0;
rx_bufq->nb_rx_hold -= delta;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
return;
@@ -612,8 +612,8 @@
next_avail += nb_refill;
rx_bufq->nb_rx_hold -= nb_refill;
} else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
}
@@ -1093,7 +1093,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(nmb == NULL)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -1203,7 +1204,8 @@
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed, 1,
+ rte_memory_order_relaxed);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index b49b1ed..eeeeed1 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -97,7 +97,7 @@
#define IDPF_RX_SPLIT_BUFQ2_ID 2
struct idpf_rx_stats {
- uint64_t mbuf_alloc_failed;
+ RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
struct idpf_rx_queue {
diff --git a/drivers/common/idpf/idpf_common_rxtx_avx512.c b/drivers/common/idpf/idpf_common_rxtx_avx512.c
index f65e8d5..3b5e124 100644
--- a/drivers/common/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/common/idpf/idpf_common_rxtx_avx512.c
@@ -38,8 +38,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -168,8 +168,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rxq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rxq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
@@ -564,8 +564,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
@@ -638,8 +638,8 @@
dma_addr0);
}
}
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- IDPF_RXQ_REARM_THRESH, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
return;
}
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 30/45] common/iavf: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (28 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 29/45] common/idpf: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 31/45] baseband/acc: " Tyler Retzlaff
` (15 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/iavf/iavf_impl.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/common/iavf/iavf_impl.c b/drivers/common/iavf/iavf_impl.c
index 8919b0e..c0ff301 100644
--- a/drivers/common/iavf/iavf_impl.c
+++ b/drivers/common/iavf/iavf_impl.c
@@ -18,7 +18,7 @@ enum iavf_status
u64 size,
u32 alignment)
{
- static uint64_t iavf_dma_memzone_id;
+ static RTE_ATOMIC(uint64_t) iavf_dma_memzone_id;
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
@@ -26,7 +26,7 @@ enum iavf_status
return IAVF_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "iavf_dma_%" PRIu64,
- __atomic_fetch_add(&iavf_dma_memzone_id, 1, __ATOMIC_RELAXED));
+ rte_atomic_fetch_add_explicit(&iavf_dma_memzone_id, 1, rte_memory_order_relaxed));
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
RTE_MEMZONE_IOVA_CONTIG, alignment,
RTE_PGSIZE_2M);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 31/45] baseband/acc: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (29 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 30/45] common/iavf: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 32/45] net/txgbe: " Tyler Retzlaff
` (14 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/baseband/acc/rte_acc100_pmd.c | 36 +++++++++++++--------------
drivers/baseband/acc/rte_vrb_pmd.c | 47 +++++++++++++++++++++++------------
2 files changed, 49 insertions(+), 34 deletions(-)
diff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c
index 4f666e5..ee50b9c 100644
--- a/drivers/baseband/acc/rte_acc100_pmd.c
+++ b/drivers/baseband/acc/rte_acc100_pmd.c
@@ -3673,8 +3673,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3728,8 +3728,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3742,8 +3742,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3755,8 +3755,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d\n",
desc, rsp.val, descs_in_tb, desc->req.numCBs);
@@ -3793,8 +3793,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3846,8 +3846,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3902,8 +3902,8 @@
uint8_t cbs_in_tb = 1, cb_idx = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3919,8 +3919,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3930,8 +3930,8 @@
/* Read remaining CBs if exists */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n",
desc, rsp.val, cb_idx, cbs_in_tb);
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index 88b1104..585dc49 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -3119,7 +3119,8 @@
desc_idx = acc_desc_idx_tail(q, *dequeued_descs);
desc = q->ring_addr + desc_idx;
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + desc->req.numCBs > max_requested_ops)
return -1;
@@ -3157,7 +3158,8 @@
struct rte_bbdev_enc_op *op;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3192,7 +3194,8 @@
uint16_t current_dequeued_descs = 0, descs_in_tb;
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
if (*dequeued_ops + 1 > max_requested_ops)
return -1;
@@ -3208,7 +3211,8 @@
/* Check if last CB in TB is ready to dequeue (and thus
* the whole TB) - checking sdone bit. If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3220,7 +3224,8 @@
while (i < descs_in_tb) {
desc = acc_desc_tail(q, *dequeued_descs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
@@ -3246,7 +3251,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3290,7 +3296,8 @@
struct rte_bbdev_dec_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3346,7 +3353,8 @@
uint32_t tb_crc_check = 0;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -3362,7 +3370,8 @@
/* Check if last CB in TB is ready to dequeue (and thus the whole TB) - checking sdone bit.
* If not return.
*/
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_SDONE))
return -1;
@@ -3372,7 +3381,8 @@
/* Read remaining CBs if exists. */
while (cb_idx < cbs_in_tb) {
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc,
rsp.val, desc->rsp.add_info_0,
@@ -3790,7 +3800,8 @@
struct rte_bbdev_fft_op *op;
desc = acc_desc_tail(q, dequeued_cbs);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4116,7 +4127,8 @@
uint8_t descs_in_op, i;
desc = acc_desc_tail(q, dequeued_ops);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
/* Check fdone bit. */
if (!(atom_desc.rsp.val & ACC_FDONE))
@@ -4127,7 +4139,8 @@
/* Get last CB. */
last_desc = acc_desc_tail(q, dequeued_ops + descs_in_op - 1);
/* Check if last op is ready to dequeue by checking fdone bit. If not exit. */
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -4137,8 +4150,9 @@
for (i = 1; i < descs_in_op - 1; i++) {
last_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)
& q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,
- __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit(
+ (uint64_t __rte_atomic *)last_desc,
+ rte_memory_order_relaxed);
if (!(atom_desc.rsp.val & ACC_FDONE))
return -1;
}
@@ -4154,7 +4168,8 @@
for (i = 0; i < descs_in_op; i++) {
desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask);
- atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);
+ atom_desc.atom_hdr = rte_atomic_load_explicit((uint64_t __rte_atomic *)desc,
+ rte_memory_order_relaxed);
rsp.val = atom_desc.rsp.val;
vrb_update_dequeued_operation(desc, rsp, &op->status, aq_dequeued, true, false);
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 32/45] net/txgbe: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (30 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 31/45] baseband/acc: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 33/45] net/null: " Tyler Retzlaff
` (13 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/txgbe/txgbe_ethdev.c | 12 +++++++-----
drivers/net/txgbe/txgbe_ethdev.h | 2 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
3 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b75e889..a58f197 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -595,7 +595,7 @@ static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -2834,7 +2834,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
struct txgbe_adapter *ad = TXGBE_DEV_ADAPTER(dev);
uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
- while (__atomic_load_n(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ while (rte_atomic_load_explicit(&ad->link_thread_running, rte_memory_order_seq_cst)) {
msec_delay(1);
timeout--;
@@ -2859,7 +2859,7 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
rte_thread_detach(rte_thread_self());
txgbe_dev_setup_link_alarm_handler(dev);
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
return 0;
}
@@ -2908,7 +2908,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
} else if (hw->phy.media_type == txgbe_media_type_fiber &&
dev->data->dev_conf.intr_conf.lsc != 0) {
txgbe_dev_wait_setup_link_complete(dev, 0);
- if (!__atomic_test_and_set(&ad->link_thread_running, __ATOMIC_SEQ_CST)) {
+ if (!rte_atomic_exchange_explicit(&ad->link_thread_running, 1,
+ rte_memory_order_seq_cst)) {
/* To avoid race condition between threads, set
* the TXGBE_FLAG_NEED_LINK_CONFIG flag only
* when there is no link thread running.
@@ -2918,7 +2919,8 @@ static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
"txgbe-link",
txgbe_dev_setup_link_thread_handler, dev) < 0) {
PMD_DRV_LOG(ERR, "Create link thread failed!");
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0,
+ rte_memory_order_seq_cst);
}
} else {
PMD_DRV_LOG(ERR,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 7e8067c..e8f55f7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -372,7 +372,7 @@ struct txgbe_adapter {
/* For RSS reta table update */
uint8_t rss_reta_updated;
- uint32_t link_thread_running;
+ RTE_ATOMIC(uint32_t) link_thread_running;
rte_thread_t link_thread_tid;
};
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index f1341fb..1abc190 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -206,7 +206,7 @@ static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
return 0;
}
- __atomic_clear(&ad->link_thread_running, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&ad->link_thread_running, 0, rte_memory_order_seq_cst);
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->device_id = pci_dev->id.device_id;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 33/45] net/null: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (31 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 32/45] net/txgbe: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 34/45] event/dlb2: " Tyler Retzlaff
` (12 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/null/rte_eth_null.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 7c46004..f4ed3b8 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -37,8 +37,8 @@ struct null_queue {
struct rte_mempool *mb_pool;
struct rte_mbuf *dummy_packet;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
+ RTE_ATOMIC(uint64_t) rx_pkts;
+ RTE_ATOMIC(uint64_t) tx_pkts;
};
struct pmd_options {
@@ -102,7 +102,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -130,7 +130,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -155,7 +155,7 @@ struct pmd_internals {
rte_pktmbuf_free(bufs[i]);
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
@@ -178,7 +178,7 @@ struct pmd_internals {
}
/* NOTE: review for potential ordering optimization */
- __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&h->tx_pkts, i, rte_memory_order_seq_cst);
return i;
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 34/45] event/dlb2: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (32 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 33/45] net/null: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 35/45] dma/idxd: " Tyler Retzlaff
` (11 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/event/dlb2/dlb2.c | 34 +++++++++++++++++-----------------
drivers/event/dlb2/dlb2_priv.h | 13 +++++--------
drivers/event/dlb2/dlb2_xstats.c | 2 +-
3 files changed, 23 insertions(+), 26 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 628ddef..0b91f03 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1005,7 +1005,7 @@ struct process_local_port_data
}
dlb2->new_event_limit = config->nb_events_limit;
- __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&dlb2->inflights, 0, rte_memory_order_seq_cst);
/* Save number of ports/queues for this event dev */
dlb2->num_ports = config->nb_event_ports;
@@ -2668,10 +2668,10 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
batch_size = credits;
if (likely(credits &&
- __atomic_compare_exchange_n(
+ rte_atomic_compare_exchange_strong_explicit(
qm_port->credit_pool[type],
- &credits, credits - batch_size, false,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
+ &credits, credits - batch_size,
+ rte_memory_order_seq_cst, rte_memory_order_seq_cst)))
return batch_size;
else
return 0;
@@ -2687,7 +2687,7 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
/* Replenish credits, saving one quanta for enqueues */
uint16_t val = ev_port->inflight_credits - quanta;
- __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
ev_port->inflight_credits -= val;
}
}
@@ -2696,8 +2696,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
struct dlb2_eventdev_port *ev_port)
{
- uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
- __ATOMIC_SEQ_CST);
+ uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
+ rte_memory_order_seq_cst);
const int num = 1;
if (unlikely(ev_port->inflight_max < sw_inflights)) {
@@ -2719,8 +2719,8 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
return 1;
}
- __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
+ rte_memory_order_seq_cst);
ev_port->inflight_credits += (credit_update_quanta);
if (ev_port->inflight_credits < num) {
@@ -3234,17 +3234,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_ldb_credits += num;
if (qm_port->cached_ldb_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_LDB_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_ldb_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
@@ -3252,17 +3252,17 @@ static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
if (qm_port->dlb2->version == DLB2_HW_V2) {
qm_port->cached_dir_credits += num;
if (qm_port->cached_dir_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_DIR_QUEUE],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_dir_credits -= batch_size;
}
} else {
qm_port->cached_credits += num;
if (qm_port->cached_credits >= 2 * batch_size) {
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
qm_port->credit_pool[DLB2_COMBINED_POOL],
- batch_size, __ATOMIC_SEQ_CST);
+ batch_size, rte_memory_order_seq_cst);
qm_port->cached_credits -= batch_size;
}
}
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index 49f1c66..2470ae0 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -348,7 +348,7 @@ struct dlb2_port {
uint32_t dequeue_depth;
enum dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
- uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) *credit_pool[DLB2_NUM_QUEUE_TYPES];
union {
struct {
uint16_t cached_ldb_credits;
@@ -586,7 +586,7 @@ struct dlb2_eventdev {
uint32_t xstats_count_mode_dev;
uint32_t xstats_count_mode_port;
uint32_t xstats_count;
- uint32_t inflights; /* use __atomic builtins */
+ RTE_ATOMIC(uint32_t) inflights;
uint32_t new_event_limit;
int max_num_events_override;
int num_dir_credits_override;
@@ -623,15 +623,12 @@ struct dlb2_eventdev {
struct {
uint16_t max_ldb_credits;
uint16_t max_dir_credits;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t ldb_credit_pool;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t dir_credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) ldb_credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) dir_credit_pool;
};
struct {
uint16_t max_credits;
- /* use __atomic builtins */ /* shared hw cred */
- alignas(RTE_CACHE_LINE_SIZE) uint32_t credit_pool;
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) credit_pool;
};
};
uint32_t cos_ports[DLB2_COS_NUM_VALS]; /* total ldb ports in each class */
diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c
index ff15271..22094f3 100644
--- a/drivers/event/dlb2/dlb2_xstats.c
+++ b/drivers/event/dlb2/dlb2_xstats.c
@@ -173,7 +173,7 @@ struct dlb2_xstats_entry {
case nb_events_limit:
return dlb2->new_event_limit;
case inflight_events:
- return __atomic_load_n(&dlb2->inflights, __ATOMIC_SEQ_CST);
+ return rte_atomic_load_explicit(&dlb2->inflights, rte_memory_order_seq_cst);
case ldb_pool_size:
return dlb2->num_ldb_credits;
case dir_pool_size:
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 35/45] dma/idxd: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (33 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 34/45] event/dlb2: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 36/45] crypto/ccp: " Tyler Retzlaff
` (10 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/dma/idxd/idxd_internal.h | 2 +-
drivers/dma/idxd/idxd_pci.c | 9 +++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index cd41777..537cf9b 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -33,7 +33,7 @@ struct idxd_pci_common {
rte_spinlock_t lk;
uint8_t wq_cfg_sz;
- uint16_t ref_count;
+ RTE_ATOMIC(uint16_t) ref_count;
volatile struct rte_idxd_bar0 *regs;
volatile uint32_t *wq_regs_base;
volatile struct rte_idxd_grpcfg *grp_regs;
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index a78889a..06fa115 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -136,7 +136,8 @@
* the PCI struct
*/
/* NOTE: review for potential ordering optimization */
- is_last_wq = (__atomic_fetch_sub(&idxd->u.pci->ref_count, 1, __ATOMIC_SEQ_CST) == 1);
+ is_last_wq = (rte_atomic_fetch_sub_explicit(&idxd->u.pci->ref_count, 1,
+ rte_memory_order_seq_cst) == 1);
if (is_last_wq) {
/* disable the device */
err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -330,9 +331,9 @@
return ret;
}
qid = rte_dma_get_dev_id_by_name(qname);
- max_qid = __atomic_load_n(
+ max_qid = rte_atomic_load_explicit(
&((struct idxd_dmadev *)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
- __ATOMIC_SEQ_CST);
+ rte_memory_order_seq_cst);
/* we have queue 0 done, now configure the rest of the queues */
for (qid = 1; qid < max_qid; qid++) {
@@ -389,7 +390,7 @@
free(idxd.u.pci);
return ret;
}
- __atomic_fetch_add(&idxd.u.pci->ref_count, 1, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&idxd.u.pci->ref_count, 1, rte_memory_order_seq_cst);
}
return 0;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 36/45] crypto/ccp: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (34 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 35/45] dma/idxd: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 37/45] common/cpt: " Tyler Retzlaff
` (9 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/crypto/ccp/ccp_dev.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index b7ca3af..41c1422 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -116,15 +116,15 @@ struct ccp_queue *
static inline void
ccp_set_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ (1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline void
ccp_clear_bit(unsigned long *bitmap, int n)
{
- __atomic_fetch_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)),
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_and_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
+ ~(1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
}
static inline uint32_t
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 37/45] common/cpt: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (35 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 36/45] crypto/ccp: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 38/45] bus/vmbus: " Tyler Retzlaff
` (8 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/common/cpt/cpt_common.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index 6596cc0..dee430f 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -73,7 +73,7 @@ struct __rte_aligned(8) cpt_request_info {
const unsigned int qsize)
{
/* Ensure ordering between setting the entry and updating the tail */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
q->tail = (q->tail + cnt) & (qsize - 1);
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 38/45] bus/vmbus: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (36 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 37/45] common/cpt: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 39/45] examples: " Tyler Retzlaff
` (7 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/bus/vmbus/rte_vmbus_reg.h | 2 +-
drivers/bus/vmbus/vmbus_channel.c | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/bus/vmbus/rte_vmbus_reg.h b/drivers/bus/vmbus/rte_vmbus_reg.h
index a17ce40..e3299aa 100644
--- a/drivers/bus/vmbus/rte_vmbus_reg.h
+++ b/drivers/bus/vmbus/rte_vmbus_reg.h
@@ -28,7 +28,7 @@ struct vmbus_message {
*/
struct vmbus_mon_trig {
- uint32_t pending;
+ RTE_ATOMIC(uint32_t) pending;
uint32_t armed;
} __rte_packed;
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
index 4d74df3..925c2aa 100644
--- a/drivers/bus/vmbus/vmbus_channel.c
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -19,16 +19,16 @@
#include "private.h"
static inline void
-vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+vmbus_sync_set_bit(volatile RTE_ATOMIC(uint32_t) *addr, uint32_t mask)
{
- /* Use GCC builtin which atomic does atomic OR operation */
- __atomic_fetch_or(addr, mask, __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_or_explicit(addr, mask, rte_memory_order_seq_cst);
}
static inline void
vmbus_set_monitor(const struct vmbus_channel *channel, uint32_t monitor_id)
{
- uint32_t *monitor_addr, monitor_mask;
+ RTE_ATOMIC(uint32_t) *monitor_addr;
+ uint32_t monitor_mask;
unsigned int trigger_index;
trigger_index = monitor_id / HV_MON_TRIG_LEN;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 39/45] examples: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (37 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 38/45] bus/vmbus: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 40/45] app/dumpcap: " Tyler Retzlaff
` (6 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
examples/bbdev_app/main.c | 13 +++++----
examples/l2fwd-event/l2fwd_common.h | 4 +--
examples/l2fwd-event/l2fwd_event.c | 24 ++++++++--------
examples/l2fwd-jobstats/main.c | 11 ++++----
.../client_server_mp/mp_server/main.c | 6 ++--
examples/server_node_efd/efd_server/main.c | 6 ++--
examples/vhost/main.c | 32 +++++++++++-----------
examples/vhost/main.h | 4 +--
examples/vhost/virtio_net.c | 13 +++++----
examples/vhost_blk/vhost_blk.c | 8 +++---
examples/vm_power_manager/channel_manager.h | 4 ++-
examples/vm_power_manager/channel_monitor.c | 9 +++---
examples/vm_power_manager/vm_power_cli.c | 3 +-
13 files changed, 73 insertions(+), 64 deletions(-)
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index d4c686c..7124b49 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -165,7 +165,7 @@ struct stats_lcore_params {
.num_dec_cores = 1,
};
-static uint16_t global_exit_flag;
+static RTE_ATOMIC(uint16_t) global_exit_flag;
/* display usage */
static inline void
@@ -277,7 +277,7 @@ uint16_t bbdev_parse_number(const char *mask)
signal_handler(int signum)
{
printf("\nSignal %d received\n", signum);
- __atomic_store_n(&global_exit_flag, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 1, rte_memory_order_relaxed);
}
static void
@@ -321,7 +321,8 @@ uint16_t bbdev_parse_number(const char *mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME &&
- !__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED); count++) {
+ !rte_atomic_load_explicit(&global_exit_flag,
+ rte_memory_order_relaxed); count++) {
memset(&link, 0, sizeof(link));
link_get_err = rte_eth_link_get_nowait(port_id, &link);
@@ -675,7 +676,7 @@ uint16_t bbdev_parse_number(const char *mask)
{
struct stats_lcore_params *stats_lcore = arg;
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
print_stats(stats_lcore);
rte_delay_ms(500);
}
@@ -921,7 +922,7 @@ uint16_t bbdev_parse_number(const char *mask)
const bool run_decoder = (lcore_conf->core_type &
(1 << RTE_BBDEV_OP_TURBO_DEC));
- while (!__atomic_load_n(&global_exit_flag, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&global_exit_flag, rte_memory_order_relaxed)) {
if (run_encoder)
run_encoding(lcore_conf);
if (run_decoder)
@@ -1055,7 +1056,7 @@ uint16_t bbdev_parse_number(const char *mask)
.align = alignof(struct rte_mbuf *),
};
- __atomic_store_n(&global_exit_flag, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&global_exit_flag, 0, rte_memory_order_relaxed);
sigret = signal(SIGTERM, signal_handler);
if (sigret == SIG_ERR)
diff --git a/examples/l2fwd-event/l2fwd_common.h b/examples/l2fwd-event/l2fwd_common.h
index c56b3e7..8cf91b9 100644
--- a/examples/l2fwd-event/l2fwd_common.h
+++ b/examples/l2fwd-event/l2fwd_common.h
@@ -61,8 +61,8 @@
/* Per-port statistics struct */
struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t dropped;
- uint64_t tx;
- uint64_t rx;
+ RTE_ATOMIC(uint64_t) tx;
+ RTE_ATOMIC(uint64_t) rx;
};
/* Event vector attributes */
diff --git a/examples/l2fwd-event/l2fwd_event.c b/examples/l2fwd-event/l2fwd_event.c
index 4b5a032..2247202 100644
--- a/examples/l2fwd-event/l2fwd_event.c
+++ b/examples/l2fwd-event/l2fwd_event.c
@@ -163,8 +163,8 @@
dst_port = rsrc->dst_ports[mbuf->port];
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].rx,
+ 1, rte_memory_order_relaxed);
mbuf->port = dst_port;
if (flags & L2FWD_EVENT_UPDT_MAC)
@@ -179,8 +179,8 @@
rte_event_eth_tx_adapter_txq_set(mbuf, 0);
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
- 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbuf->port].tx,
+ 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -367,8 +367,8 @@
vec->queue = 0;
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[mbufs[0]->port].rx,
+ vec->nb_elem, rte_memory_order_relaxed);
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (j < vec->nb_elem)
@@ -382,14 +382,14 @@
}
if (timer_period > 0)
- __atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
- vec->nb_elem, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&rsrc->port_stats[vec->port].tx,
+ vec->nb_elem, rte_memory_order_relaxed);
} else {
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].rx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
if (j < vec->nb_elem)
rte_prefetch0(
@@ -406,9 +406,9 @@
rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
if (timer_period > 0)
- __atomic_fetch_add(
+ rte_atomic_fetch_add_explicit(
&rsrc->port_stats[mbufs[i]->port].tx, 1,
- __ATOMIC_RELAXED);
+ rte_memory_order_relaxed);
}
}
}
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index cb7582a..308b8ed 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -80,7 +80,7 @@ struct __rte_cache_aligned lcore_queue_conf {
struct rte_jobstats idle_job;
struct rte_jobstats_context jobs_context;
- uint16_t stats_read_pending;
+ RTE_ATOMIC(uint16_t) stats_read_pending;
rte_spinlock_t lock;
};
/* >8 End of list of queues to be polled for given lcore. */
@@ -151,9 +151,9 @@ struct __rte_cache_aligned l2fwd_port_statistics {
uint64_t collection_time = rte_get_timer_cycles();
/* Ask forwarding thread to give us stats. */
- __atomic_store_n(&qconf->stats_read_pending, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 1, rte_memory_order_relaxed);
rte_spinlock_lock(&qconf->lock);
- __atomic_store_n(&qconf->stats_read_pending, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&qconf->stats_read_pending, 0, rte_memory_order_relaxed);
/* Collect context statistics. */
stats_period = ctx->state_time - ctx->start_time;
@@ -522,8 +522,9 @@ struct __rte_cache_aligned l2fwd_port_statistics {
repeats++;
need_manage = qconf->flush_timer.expire < now;
/* Check if we was esked to give a stats. */
- stats_read_pending = __atomic_load_n(&qconf->stats_read_pending,
- __ATOMIC_RELAXED);
+ stats_read_pending = rte_atomic_load_explicit(
+ &qconf->stats_read_pending,
+ rte_memory_order_relaxed);
need_manage |= stats_read_pending;
for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index f54bb8b..ebfc2fe 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -157,12 +157,12 @@ struct client_rx_buf {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
uint32_t status = 0;
/* Only one core should display stats */
- if (__atomic_compare_exchange_n(&display_stats, &status, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &status, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/server_node_efd/efd_server/main.c b/examples/server_node_efd/efd_server/main.c
index fd72882..75ff0ea 100644
--- a/examples/server_node_efd/efd_server/main.c
+++ b/examples/server_node_efd/efd_server/main.c
@@ -177,12 +177,12 @@ struct efd_stats {
sleep_lcore(__rte_unused void *dummy)
{
/* Used to pick a display thread - static, so zero-initialised */
- static uint32_t display_stats;
+ static RTE_ATOMIC(uint32_t) display_stats;
/* Only one core should display stats */
uint32_t display_init = 0;
- if (__atomic_compare_exchange_n(&display_stats, &display_init, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_stats, &display_init, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
const unsigned int sleeptime = 1;
printf("Core %u displaying statistics\n", rte_lcore_id());
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 3fc1b15..4391d88 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1052,10 +1052,10 @@ static unsigned check_ports_num(unsigned nb_ports)
}
if (enable_stats) {
- __atomic_fetch_add(&dst_vdev->stats.rx_total_atomic, 1,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&dst_vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_total_atomic, 1,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&dst_vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
src_vdev->stats.tx_total++;
src_vdev->stats.tx += ret;
}
@@ -1072,10 +1072,10 @@ static unsigned check_ports_num(unsigned nb_ports)
ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev, VIRTIO_RXQ, m, nr_xmit);
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, nr_xmit,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, ret,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, nr_xmit,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, ret,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1404,10 +1404,10 @@ static void virtio_tx_offload(struct rte_mbuf *m)
}
if (enable_stats) {
- __atomic_fetch_add(&vdev->stats.rx_total_atomic, rx_count,
- __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&vdev->stats.rx_atomic, enqueue_count,
- __ATOMIC_SEQ_CST);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_total_atomic, rx_count,
+ rte_memory_order_seq_cst);
+ rte_atomic_fetch_add_explicit(&vdev->stats.rx_atomic, enqueue_count,
+ rte_memory_order_seq_cst);
}
if (!dma_bind[vid2socketid[vdev->vid]].dmas[VIRTIO_RXQ].async_enabled) {
@@ -1832,10 +1832,10 @@ uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- rx_total = __atomic_load_n(&vdev->stats.rx_total_atomic,
- __ATOMIC_SEQ_CST);
- rx = __atomic_load_n(&vdev->stats.rx_atomic,
- __ATOMIC_SEQ_CST);
+ rx_total = rte_atomic_load_explicit(&vdev->stats.rx_total_atomic,
+ rte_memory_order_seq_cst);
+ rx = rte_atomic_load_explicit(&vdev->stats.rx_atomic,
+ rte_memory_order_seq_cst);
rx_dropped = rx_total - rx;
printf("Statistics for device %d\n"
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index c1c9a42..c986cbc 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -22,8 +22,8 @@
struct device_statistics {
uint64_t tx;
uint64_t tx_total;
- uint64_t rx_atomic;
- uint64_t rx_total_atomic;
+ RTE_ATOMIC(uint64_t) rx_atomic;
+ RTE_ATOMIC(uint64_t) rx_total_atomic;
};
struct vhost_queue {
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index 514c8e0..55af6e7 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -198,7 +198,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- avail_idx = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE);
+ avail_idx = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire);
start_idx = queue->last_used_idx;
free_entries = avail_idx - start_idx;
count = RTE_MIN(count, free_entries);
@@ -231,7 +232,8 @@
rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
}
- __atomic_fetch_add(&vr->used->idx, count, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, count,
+ rte_memory_order_release);
queue->last_used_idx += count;
rte_vhost_vring_call(dev->vid, queue_id);
@@ -386,8 +388,8 @@
queue = &dev->queues[queue_id];
vr = &queue->vr;
- free_entries = __atomic_load_n(&vr->avail->idx, __ATOMIC_ACQUIRE) -
- queue->last_avail_idx;
+ free_entries = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
+ rte_memory_order_acquire) - queue->last_avail_idx;
if (free_entries == 0)
return 0;
@@ -442,7 +444,8 @@
queue->last_avail_idx += i;
queue->last_used_idx += i;
- __atomic_fetch_add(&vr->used->idx, i, __ATOMIC_ACQ_REL);
+ rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, i,
+ rte_memory_order_acq_rel);
rte_vhost_vring_call(dev->vid, queue_id);
diff --git a/examples/vhost_blk/vhost_blk.c b/examples/vhost_blk/vhost_blk.c
index 376f7b8..03f1ac9 100644
--- a/examples/vhost_blk/vhost_blk.c
+++ b/examples/vhost_blk/vhost_blk.c
@@ -85,9 +85,9 @@ struct vhost_blk_ctrlr *
*/
used->ring[used->idx & (vq->vring.size - 1)].id = task->req_idx;
used->ring[used->idx & (vq->vring.size - 1)].len = task->data_len;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
used->idx++;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_split(task->ctrlr->vid,
vq->id, used->idx, task->req_idx);
@@ -111,12 +111,12 @@ struct vhost_blk_ctrlr *
desc->id = task->buffer_id;
desc->addr = 0;
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
if (vq->used_wrap_counter)
desc->flags |= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED;
else
desc->flags &= ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
- rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
rte_vhost_clr_inflight_desc_packed(task->ctrlr->vid, vq->id,
task->inflight_idx);
diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index 7038e9d..eb989b2 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -13,6 +13,8 @@
#include <linux/un.h>
#include <stdbool.h>
+#include <rte_stdatomic.h>
+
/* Maximum name length including '\0' terminator */
#define CHANNEL_MGR_MAX_NAME_LEN 64
@@ -58,7 +60,7 @@ enum channel_type {
*/
struct channel_info {
char channel_path[UNIX_PATH_MAX]; /**< Path to host socket */
- volatile uint32_t status; /**< Connection status(enum channel_status) */
+ volatile RTE_ATOMIC(uint32_t) status; /**< Connection status(enum channel_status) */
int fd; /**< AF_UNIX socket fd */
unsigned channel_num; /**< CHANNEL_MGR_SOCKET_PATH/<vm_name>.channel_num */
enum channel_type type; /**< Binary, ini, json, etc. */
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 5fef268..d384c86 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -828,8 +828,9 @@ void channel_monitor_exit(void)
return -1;
uint32_t channel_connected = CHANNEL_MGR_CHANNEL_CONNECTED;
- if (__atomic_compare_exchange_n(&(chan_info->status), &channel_connected,
- CHANNEL_MGR_CHANNEL_PROCESSING, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED) == 0)
+ if (rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_connected,
+ CHANNEL_MGR_CHANNEL_PROCESSING, rte_memory_order_relaxed,
+ rte_memory_order_relaxed) == 0)
return -1;
if (pkt->command == RTE_POWER_CPU_POWER) {
@@ -934,8 +935,8 @@ void channel_monitor_exit(void)
* from management thread
*/
uint32_t channel_processing = CHANNEL_MGR_CHANNEL_PROCESSING;
- __atomic_compare_exchange_n(&(chan_info->status), &channel_processing,
- CHANNEL_MGR_CHANNEL_CONNECTED, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ rte_atomic_compare_exchange_strong_explicit(&(chan_info->status), &channel_processing,
+ CHANNEL_MGR_CHANNEL_CONNECTED, rte_memory_order_relaxed, rte_memory_order_relaxed);
return 0;
}
diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index 1a55e55..c078325 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -74,7 +74,8 @@ struct cmd_show_vm_result {
for (i = 0; i < info.num_channels; i++) {
cmdline_printf(cl, " [%u]: %s, status = ", i,
info.channels[i].channel_path);
- switch (info.channels[i].status) {
+ switch (rte_atomic_load_explicit(&info.channels[i].status,
+ rte_memory_order_relaxed)) {
case CHANNEL_MGR_CHANNEL_CONNECTED:
cmdline_printf(cl, "CONNECTED\n");
break;
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 40/45] app/dumpcap: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (38 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 39/45] examples: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 41/45] app/test: " Tyler Retzlaff
` (5 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/dumpcap/main.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/app/dumpcap/main.c b/app/dumpcap/main.c
index cc0f66b..b25b95e 100644
--- a/app/dumpcap/main.c
+++ b/app/dumpcap/main.c
@@ -51,7 +51,7 @@
/* command line flags */
static const char *progname;
-static bool quit_signal;
+static RTE_ATOMIC(bool) quit_signal;
static bool group_read;
static bool quiet;
static bool use_pcapng = true;
@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv)
static void
signal_handler(int sig_num __rte_unused)
{
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
@@ -490,7 +490,7 @@ static void statistics_loop(void)
printf("%-15s %10s %10s\n",
"Interface", "Received", "Dropped");
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
RTE_ETH_FOREACH_DEV(p) {
if (rte_eth_dev_get_name_by_port(p, name) < 0)
continue;
@@ -528,7 +528,7 @@ static void statistics_loop(void)
static void
monitor_primary(void *arg __rte_unused)
{
- if (__atomic_load_n(&quit_signal, __ATOMIC_RELAXED))
+ if (rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed))
return;
if (rte_eal_primary_proc_alive(NULL)) {
@@ -536,7 +536,7 @@ static void statistics_loop(void)
} else {
fprintf(stderr,
"Primary process is no longer active, exiting...\n");
- __atomic_store_n(&quit_signal, true, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&quit_signal, true, rte_memory_order_relaxed);
}
}
@@ -983,7 +983,7 @@ int main(int argc, char **argv)
show_count(0);
}
- while (!__atomic_load_n(&quit_signal, __ATOMIC_RELAXED)) {
+ while (!rte_atomic_load_explicit(&quit_signal, rte_memory_order_relaxed)) {
if (process_ring(out, r) < 0) {
fprintf(stderr, "pcapng file write failed; %s\n",
strerror(errno));
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 41/45] app/test: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (39 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 40/45] app/dumpcap: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 42/45] app/test-eventdev: " Tyler Retzlaff
` (4 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test/test_bpf.c | 46 ++++++++-----
app/test/test_distributor.c | 114 ++++++++++++++++-----------------
app/test/test_distributor_perf.c | 4 +-
app/test/test_func_reentrancy.c | 28 ++++----
app/test/test_hash_multiwriter.c | 16 ++---
app/test/test_hash_readwrite.c | 74 ++++++++++-----------
app/test/test_hash_readwrite_lf_perf.c | 88 ++++++++++++-------------
app/test/test_lcores.c | 25 ++++----
app/test/test_lpm_perf.c | 14 ++--
app/test/test_mcslock.c | 12 ++--
app/test/test_mempool_perf.c | 9 +--
app/test/test_pflock.c | 13 ++--
app/test/test_pmd_perf.c | 10 +--
app/test/test_rcu_qsbr_perf.c | 114 +++++++++++++++++----------------
app/test/test_ring_perf.c | 11 ++--
app/test/test_ring_stress_impl.h | 10 +--
app/test/test_rwlock.c | 9 +--
app/test/test_seqlock.c | 6 +-
app/test/test_service_cores.c | 24 +++----
app/test/test_spinlock.c | 9 +--
app/test/test_stack_perf.c | 12 ++--
app/test/test_threads.c | 33 +++++-----
app/test/test_ticketlock.c | 9 +--
app/test/test_timer.c | 31 +++++----
24 files changed, 378 insertions(+), 343 deletions(-)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index 53e3a31..2e43442 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -39,8 +39,8 @@
*/
struct dummy_offset {
- uint64_t u64;
- uint32_t u32;
+ RTE_ATOMIC(uint64_t) u64;
+ RTE_ATOMIC(uint32_t) u32;
uint16_t u16;
uint8_t u8;
};
@@ -1581,32 +1581,46 @@ struct bpf_test {
memset(&dfe, 0, sizeof(dfe));
rv = 1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = -1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = (int32_t)TEST_FILL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_1;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_MUL_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_2;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
rv = TEST_JCC_3;
- __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
- __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv,
+ rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv,
+ rte_memory_order_relaxed);
return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
}
diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index 13357b9..60fe96e 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -47,14 +47,14 @@ struct worker_params {
struct worker_params worker_params;
/* statics - all zero-initialized by default */
-static volatile int quit; /**< general quit variable for all threads */
-static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
-static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
-static volatile unsigned worker_idx;
-static volatile unsigned zero_idx;
+static volatile RTE_ATOMIC(int) quit; /**< general quit variable for all threads */
+static volatile RTE_ATOMIC(int) zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile RTE_ATOMIC(int) zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
+static volatile RTE_ATOMIC(unsigned int) zero_idx;
struct __rte_cache_aligned worker_stats {
- volatile unsigned handled_packets;
+ volatile RTE_ATOMIC(unsigned int) handled_packets;
};
struct worker_stats worker_stats[RTE_MAX_LCORE];
@@ -66,8 +66,8 @@ struct __rte_cache_aligned worker_stats {
{
unsigned i, count = 0;
for (i = 0; i < worker_idx; i++)
- count += __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED);
+ count += rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed);
return count;
}
@@ -77,8 +77,8 @@ struct __rte_cache_aligned worker_stats {
{
unsigned int i;
for (i = 0; i < RTE_MAX_LCORE; i++)
- __atomic_store_n(&worker_stats[i].handled_packets, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&worker_stats[i].handled_packets, 0,
+ rte_memory_order_relaxed);
}
/* this is the basic worker function for sanity test
@@ -91,17 +91,17 @@ struct __rte_cache_aligned worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -162,8 +162,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with all zero hashes done.\n");
/* pick two flows and check they go correctly */
@@ -189,9 +189,9 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(
+ rte_atomic_load_explicit(
&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_memory_order_relaxed));
printf("Sanity test with two hash values done\n");
}
@@ -218,8 +218,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
printf("Sanity test with non-zero hashes done\n");
rte_mempool_put_bulk(p, (void *)bufs, BURST);
@@ -311,18 +311,18 @@ struct __rte_cache_aligned worker_stats {
struct rte_distributor *d = wp->dist;
unsigned int i;
unsigned int num;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
rte_pktmbuf_free(buf[i]);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
@@ -381,51 +381,51 @@ struct __rte_cache_aligned worker_stats {
unsigned int num;
unsigned int zero_id = 0;
unsigned int zero_unset;
- const unsigned int id = __atomic_fetch_add(&worker_idx, 1,
- __ATOMIC_RELAXED);
+ const unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
/* wait for quit single globally, or for worker zero, wait
* for zero_quit */
while (!quit && !(id == zero_id && zero_quit)) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
if (num > 0) {
zero_unset = RTE_MAX_LCORE;
- __atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
- false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ rte_atomic_compare_exchange_strong_explicit(&zero_idx, &zero_unset, id,
+ rte_memory_order_acq_rel, rte_memory_order_acquire);
}
- zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+ zero_id = rte_atomic_load_explicit(&zero_idx, rte_memory_order_acquire);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
if (id == zero_id) {
rte_distributor_return_pkt(d, id, NULL, 0);
/* for worker zero, allow it to restart to pick up last packet
* when all workers are shutting down.
*/
- __atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 1, rte_memory_order_release);
while (zero_quit)
usleep(100);
- __atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&zero_sleep, 0, rte_memory_order_release);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets,
- num, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets,
+ num, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
}
}
@@ -491,17 +491,17 @@ struct __rte_cache_aligned worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST * 2) {
printf("Line %d: Error, not all packets flushed. "
@@ -560,18 +560,18 @@ struct __rte_cache_aligned worker_stats {
/* flush the distributor */
rte_distributor_flush(d);
- while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (!rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_distributor_flush(d);
zero_quit = 0;
- while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+ while (rte_atomic_load_explicit(&zero_sleep, rte_memory_order_acquire))
rte_delay_us(100);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
@@ -596,18 +596,18 @@ struct __rte_cache_aligned worker_stats {
struct worker_params *wp = arg;
struct rte_distributor *db = wp->dist;
unsigned int num, i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
while (!quit) {
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
for (i = 0; i < num; i++)
*seq_field(buf[i]) += id + 1;
num = rte_distributor_get_pkt(db, id,
buf, buf, num);
}
- __atomic_fetch_add(&worker_stats[id].handled_packets, num,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&worker_stats[id].handled_packets, num,
+ rte_memory_order_relaxed);
rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -679,8 +679,8 @@ struct __rte_cache_aligned worker_stats {
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
- __atomic_load_n(&worker_stats[i].handled_packets,
- __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&worker_stats[i].handled_packets,
+ rte_memory_order_relaxed));
/* Sort returned packets by sent order (sequence numbers). */
for (i = 0; i < buf_count; i++) {
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index c0ad39d..e678aec 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -31,7 +31,7 @@
/* static vars - zero initialized by default */
static volatile int quit;
-static volatile unsigned worker_idx;
+static volatile RTE_ATOMIC(unsigned int) worker_idx;
struct __rte_cache_aligned worker_stats {
volatile unsigned handled_packets;
@@ -121,7 +121,7 @@ struct __rte_cache_aligned worker_stats {
struct rte_distributor *d = arg;
unsigned int num = 0;
int i;
- unsigned int id = __atomic_fetch_add(&worker_idx, 1, __ATOMIC_RELAXED);
+ unsigned int id = rte_atomic_fetch_add_explicit(&worker_idx, 1, rte_memory_order_relaxed);
alignas(RTE_CACHE_LINE_SIZE) struct rte_mbuf *buf[8];
for (i = 0; i < 8; i++)
diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index 9296de2..bae39af 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -53,12 +53,13 @@
#define MAX_LCORES (rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))
-static uint32_t obj_count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) obj_count;
+static RTE_ATOMIC(uint32_t) synchro;
#define WAIT_SYNCHRO_FOR_WORKERS() do { \
if (lcore_self != rte_get_main_lcore()) \
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, \
+ rte_memory_order_relaxed); \
} while(0)
/*
@@ -71,7 +72,8 @@
WAIT_SYNCHRO_FOR_WORKERS();
- __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
+ /* silent the check in the caller */
+ rte_atomic_store_explicit(&obj_count, 1, rte_memory_order_relaxed);
if (rte_eal_init(0, NULL) != -1)
return -1;
@@ -113,7 +115,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
if (rp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -178,7 +180,7 @@
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create/lookup new ring several times */
@@ -244,7 +246,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_hash_create(&hash_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple times simultaneously */
@@ -311,7 +313,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_fbk_hash_create(&fbk_params);
if (handle != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -376,7 +378,7 @@
for (i = 0; i < MAX_ITER_ONCE; i++) {
lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
if (lpm != NULL)
- __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
}
/* create multiple fbk tables simultaneously */
@@ -437,8 +439,8 @@ struct test_case test_cases[] = {
if (pt_case->func == NULL)
return -1;
- __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&obj_count, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
@@ -448,7 +450,7 @@ struct test_case test_cases[] = {
rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
}
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
@@ -463,7 +465,7 @@ struct test_case test_cases[] = {
pt_case->clean(lcore_id);
}
- count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
+ count = rte_atomic_load_explicit(&obj_count, rte_memory_order_relaxed);
if (count != 1) {
printf("%s: common object allocated %d times (should be 1)\n",
pt_case->name, count);
diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
index ed9dd41..33d3147 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/app/test/test_hash_multiwriter.c
@@ -43,8 +43,8 @@ struct {
const uint32_t nb_total_tsx_insertion = 4.5*1024*1024;
uint32_t rounded_nb_total_tsx_insertion;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
static int use_htm;
@@ -84,8 +84,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)
tbl_multiwriter_test_params.keys[i]
@@ -166,8 +166,8 @@ struct {
tbl_multiwriter_test_params.found = found;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
/* Get list of enabled cores */
i = 0;
@@ -233,8 +233,8 @@ struct {
printf("No key corrupted during multiwriter insertion.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED)/
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed)/
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf(" cycles per insertion: %llu\n", cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 4997a01..1867376 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -45,14 +45,14 @@ struct {
struct rte_hash *h;
} tbl_rw_test_param;
-static uint64_t gcycles;
-static uint64_t ginsertions;
+static RTE_ATOMIC(uint64_t) gcycles;
+static RTE_ATOMIC(uint64_t) ginsertions;
-static uint64_t gread_cycles;
-static uint64_t gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
-static uint64_t greads;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrites;
static int
test_hash_readwrite_worker(__rte_unused void *arg)
@@ -110,8 +110,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gcycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&ginsertions, i - offset, rte_memory_order_relaxed);
for (; i < offset + tbl_rw_test_param.num_insert; i++)
tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
@@ -209,8 +209,8 @@ struct {
int worker_cnt = rte_lcore_count() - 1;
uint32_t tot_insert = 0;
- __atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gcycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&ginsertions, 0, rte_memory_order_relaxed);
if (init_params(use_ext, use_htm, use_rw_lf, use_jhash) != 0)
goto err;
@@ -269,8 +269,8 @@ struct {
printf("No key corrupted during read-write test.\n");
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gcycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gcycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&ginsertions, rte_memory_order_relaxed);
printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
@@ -310,8 +310,8 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, i, rte_memory_order_relaxed);
return 0;
}
@@ -344,9 +344,9 @@ struct {
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rw_test_param.num_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rw_test_param.num_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -369,11 +369,11 @@ struct {
uint64_t start = 0, end = 0;
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
if (init_params(0, use_htm, 0, use_jhash) != 0)
goto err;
@@ -430,10 +430,10 @@ struct {
if (tot_worker_lcore < core_cnt[n] * 2)
goto finish;
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -475,8 +475,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_only[n] = cycles_per_insertion;
printf("Reader only: cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -484,17 +484,17 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->write_only[n] = cycles_per_insertion;
printf("Writer only: cycles per writes: %llu\n",
cycles_per_insertion);
}
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rw_test_param.h);
@@ -569,8 +569,8 @@ struct {
if (reader_faster) {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
perf_results->read_write_r[n] = cycles_per_insertion;
printf("Read-write cycles per lookup: %llu\n",
cycles_per_insertion);
@@ -578,8 +578,8 @@ struct {
else {
unsigned long long int cycles_per_insertion =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
perf_results->read_write_w[n] = cycles_per_insertion;
printf("Read-write cycles per writes: %llu\n",
cycles_per_insertion);
diff --git a/app/test/test_hash_readwrite_lf_perf.c b/app/test/test_hash_readwrite_lf_perf.c
index 5d18850..4523985 100644
--- a/app/test/test_hash_readwrite_lf_perf.c
+++ b/app/test/test_hash_readwrite_lf_perf.c
@@ -86,10 +86,10 @@ struct rwc_perf {
struct rte_hash *h;
} tbl_rwc_test_param;
-static uint64_t gread_cycles;
-static uint64_t greads;
-static uint64_t gwrite_cycles;
-static uint64_t gwrites;
+static RTE_ATOMIC(uint64_t) gread_cycles;
+static RTE_ATOMIC(uint64_t) greads;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
+static RTE_ATOMIC(uint64_t) gwrites;
static volatile uint8_t writer_done;
@@ -651,8 +651,8 @@ struct rwc_perf {
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&greads, read_cnt*loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gread_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&greads, read_cnt*loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -724,8 +724,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -742,8 +742,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_hit[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -791,8 +791,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -811,8 +811,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_no_ks_r_miss[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -861,8 +861,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -884,8 +884,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_nsp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -935,8 +935,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -958,8 +958,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_sp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1007,8 +1007,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1030,8 +1030,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
}
@@ -1087,9 +1087,9 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n",
rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0,
+ rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
@@ -1127,10 +1127,10 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles,
- __ATOMIC_RELAXED) /
- __atomic_load_n(&greads,
- __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles,
+ rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&greads,
+ rte_memory_order_relaxed);
rwc_perf_results->multi_rw[m][k][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n",
@@ -1178,8 +1178,8 @@ struct rwc_perf {
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&greads, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gread_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
write_type = WRITE_NO_KEY_SHIFT;
@@ -1210,8 +1210,8 @@ struct rwc_perf {
goto err;
unsigned long long cycles_per_lookup =
- __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
- / __atomic_load_n(&greads, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gread_cycles, rte_memory_order_relaxed)
+ / rte_atomic_load_explicit(&greads, rte_memory_order_relaxed);
rwc_perf_results->w_ks_r_hit_extbkt[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
@@ -1280,9 +1280,9 @@ struct rwc_perf {
tbl_rwc_test_param.keys_no_ks + i);
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,
- __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&gwrites, tbl_rwc_test_param.single_insert,
+ rte_memory_order_relaxed);
return 0;
}
@@ -1328,8 +1328,8 @@ struct rwc_perf {
rwc_core_cnt[n];
printf("\nNumber of writers: %u\n", rwc_core_cnt[n]);
- __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrites, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
rte_hash_reset(tbl_rwc_test_param.h);
rte_rcu_qsbr_init(rv, RTE_MAX_LCORE);
@@ -1364,8 +1364,8 @@ struct rwc_perf {
rte_eal_mp_wait_lcore();
unsigned long long cycles_per_write_operation =
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&gwrites, rte_memory_order_relaxed);
rwc_perf_results->writer_add_del[n]
= cycles_per_write_operation;
printf("Cycles per write operation: %llu\n",
diff --git a/app/test/test_lcores.c b/app/test/test_lcores.c
index 3434a0d..bd5c0dd 100644
--- a/app/test/test_lcores.c
+++ b/app/test/test_lcores.c
@@ -10,6 +10,7 @@
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_thread.h>
+#include <rte_stdatomic.h>
#include "test.h"
@@ -25,7 +26,7 @@ struct thread_context {
enum { Thread_INIT, Thread_ERROR, Thread_DONE } state;
bool lcore_id_any;
rte_thread_t id;
- unsigned int *registered_count;
+ RTE_ATOMIC(unsigned int) *registered_count;
};
static uint32_t thread_loop(void *arg)
@@ -49,10 +50,10 @@ static uint32_t thread_loop(void *arg)
t->state = Thread_ERROR;
}
/* Report register happened to the control thread. */
- __atomic_fetch_add(t->registered_count, 1, __ATOMIC_RELEASE);
+ rte_atomic_fetch_add_explicit(t->registered_count, 1, rte_memory_order_release);
/* Wait for release from the control thread. */
- while (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0)
+ while (rte_atomic_load_explicit(t->registered_count, rte_memory_order_acquire) != 0)
sched_yield();
rte_thread_unregister();
lcore_id = rte_lcore_id();
@@ -73,7 +74,7 @@ static uint32_t thread_loop(void *arg)
{
struct thread_context thread_contexts[RTE_MAX_LCORE];
unsigned int non_eal_threads_count;
- unsigned int registered_count;
+ RTE_ATOMIC(unsigned int) registered_count;
struct thread_context *t;
unsigned int i;
int ret;
@@ -93,7 +94,7 @@ static uint32_t thread_loop(void *arg)
}
printf("non-EAL threads count: %u\n", non_eal_threads_count);
/* Wait all non-EAL threads to register. */
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
@@ -109,14 +110,14 @@ static uint32_t thread_loop(void *arg)
if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
non_eal_threads_count++;
printf("non-EAL threads count: %u\n", non_eal_threads_count);
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
}
skip_lcore_any:
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -225,7 +226,7 @@ struct limit_lcore_context {
struct thread_context thread_contexts[2];
unsigned int non_eal_threads_count = 0;
struct limit_lcore_context l[2] = {};
- unsigned int registered_count = 0;
+ RTE_ATOMIC(unsigned int) registered_count = 0;
struct thread_context *t;
void *handle[2] = {};
unsigned int i;
@@ -275,7 +276,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 1 ||
@@ -298,7 +299,7 @@ struct limit_lcore_context {
if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
goto cleanup_threads;
non_eal_threads_count++;
- while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) !=
+ while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
non_eal_threads_count)
sched_yield();
if (l[0].init != eal_threads_count + 2 ||
@@ -315,7 +316,7 @@ struct limit_lcore_context {
}
rte_lcore_dump(stdout);
/* Release all threads, and check their states. */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
ret = 0;
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
@@ -337,7 +338,7 @@ struct limit_lcore_context {
cleanup_threads:
/* Release all threads */
- __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
for (i = 0; i < non_eal_threads_count; i++) {
t = &thread_contexts[i];
rte_thread_join(t->id, NULL);
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
index 82daf9e..bc4bdde 100644
--- a/app/test/test_lpm_perf.c
+++ b/app/test/test_lpm_perf.c
@@ -22,8 +22,8 @@
struct rte_lpm *lpm;
static struct rte_rcu_qsbr *rv;
static volatile uint8_t writer_done;
-static volatile uint32_t thr_id;
-static uint64_t gwrite_cycles;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
+static RTE_ATOMIC(uint64_t) gwrite_cycles;
static uint32_t num_writers;
/* LPM APIs are not thread safe, use spinlock */
@@ -362,7 +362,7 @@ static void generate_large_route_rule_table(void)
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -470,7 +470,7 @@ static void generate_large_route_rule_table(void)
total_cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&gwrite_cycles, total_cycles, rte_memory_order_relaxed);
return 0;
@@ -540,9 +540,9 @@ static void generate_large_route_rule_table(void)
reader_f = test_lpm_reader;
writer_done = 0;
- __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&gwrite_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
/* Launch reader threads */
for (i = j; i < num_cores; i++)
@@ -563,7 +563,7 @@ static void generate_large_route_rule_table(void)
printf("Total LPM Adds: %d\n", TOTAL_WRITES);
printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
printf("Average LPM Add/Del: %"PRIu64" cycles\n",
- __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
+ rte_atomic_load_explicit(&gwrite_cycles, rte_memory_order_relaxed)
/ TOTAL_WRITES);
writer_done = 1;
diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c
index 46ff13c..8fcbc11 100644
--- a/app/test/test_mcslock.c
+++ b/app/test/test_mcslock.c
@@ -42,7 +42,7 @@
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_mcslock_per_core(__rte_unused void *arg)
@@ -75,7 +75,7 @@
rte_mcslock_t ml_perf_me;
/* wait synchro */
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -100,14 +100,14 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
memset(time_count, 0, sizeof(time_count));
printf("\nTest with lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lock = 1;
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
@@ -116,11 +116,11 @@
printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index a42a772..130d598 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -88,7 +88,7 @@
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
/* number of objects in one bulk operation (get or put) */
static unsigned n_get_bulk;
@@ -188,7 +188,8 @@ struct __rte_cache_aligned mempool_test_stats {
/* wait synchro for workers */
if (lcore_id != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
start_cycles = rte_get_timer_cycles();
@@ -233,7 +234,7 @@ struct __rte_cache_aligned mempool_test_stats {
int ret;
unsigned cores_save = cores;
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
/* reset stats */
memset(stats, 0, sizeof(stats));
@@ -258,7 +259,7 @@ struct __rte_cache_aligned mempool_test_stats {
}
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
ret = per_lcore_mempool_test(mp);
diff --git a/app/test/test_pflock.c b/app/test/test_pflock.c
index 5f77b15..d989a68 100644
--- a/app/test/test_pflock.c
+++ b/app/test/test_pflock.c
@@ -31,7 +31,7 @@
static rte_pflock_t sl;
static rte_pflock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_pflock_per_core(__rte_unused void *arg)
@@ -69,7 +69,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -99,7 +100,7 @@
const unsigned int lcore = rte_lcore_id();
printf("\nTest with no lock on single core...\n");
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -107,7 +108,7 @@
printf("\nTest with phase-fair lock on single core...\n");
lock = 1;
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
printf("Core [%u] Cost Time = %"PRIu64" us\n",
lcore, time_count[lcore]);
@@ -116,12 +117,12 @@
printf("\nPhase-fair test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index 35fa068..995b0a6 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -537,7 +537,7 @@ enum {
return 0;
}
-static uint64_t start;
+static RTE_ATOMIC(uint64_t) start;
static inline int
poll_burst(void *args)
@@ -575,7 +575,7 @@ enum {
num[portid] = pkt_per_port;
}
- rte_wait_until_equal_64(&start, 1, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_64((uint64_t *)(uintptr_t)&start, 1, rte_memory_order_acquire);
cur_tsc = rte_rdtsc();
while (total) {
@@ -629,9 +629,9 @@ enum {
/* only when polling first */
if (flags == SC_BURST_POLL_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_relaxed);
else
- __atomic_store_n(&start, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&start, 0, rte_memory_order_relaxed);
/* start polling thread
* if in POLL_FIRST mode, poll once launched;
@@ -655,7 +655,7 @@ enum {
/* only when polling second */
if (flags == SC_BURST_XMIT_FIRST)
- __atomic_store_n(&start, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&start, 1, rte_memory_order_release);
/* wait for polling finished */
diff_tsc = rte_eal_wait_lcore(lcore);
diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c
index ce88a73..d1bf5c5 100644
--- a/app/test/test_rcu_qsbr_perf.c
+++ b/app/test/test_rcu_qsbr_perf.c
@@ -25,13 +25,15 @@
static uint32_t *hash_data[TOTAL_ENTRY];
static volatile uint8_t writer_done;
static volatile uint8_t all_registered;
-static volatile uint32_t thr_id;
+static volatile RTE_ATOMIC(uint32_t) thr_id;
static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
static struct rte_hash *h;
static char hash_name[8];
-static uint64_t updates, checks;
-static uint64_t update_cycles, check_cycles;
+static RTE_ATOMIC(uint64_t) updates;
+static RTE_ATOMIC(uint64_t) checks;
+static RTE_ATOMIC(uint64_t) update_cycles;
+static RTE_ATOMIC(uint64_t) check_cycles;
/* Scale down results to 1000 operations to support lower
* granularity clocks.
@@ -44,7 +46,7 @@
{
uint32_t tmp_thr_id;
- tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
+ tmp_thr_id = rte_atomic_fetch_add_explicit(&thr_id, 1, rte_memory_order_relaxed);
if (tmp_thr_id >= RTE_MAX_LCORE)
printf("Invalid thread id %u\n", tmp_thr_id);
@@ -81,8 +83,8 @@
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
/* Make the thread offline */
rte_rcu_qsbr_thread_offline(t[0], thread_id);
@@ -113,8 +115,8 @@
} while (loop_cnt < 20000000);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, loop_cnt, rte_memory_order_relaxed);
return 0;
}
@@ -130,15 +132,15 @@
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
printf("\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\n",
num_cores - 1);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
if (all_registered == 1)
tmp_num_cores = num_cores - 1;
@@ -168,15 +170,16 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -193,10 +196,10 @@
size_t sz;
unsigned int i, tmp_num_cores;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf Test: %d Readers\n", num_cores);
@@ -220,11 +223,11 @@
rte_eal_mp_wait_lcore();
printf("Total quiescent state updates = %"PRIi64"\n",
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per %d quiescent state updates: %"PRIi64"\n",
RCU_SCALE_DOWN,
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&updates, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&updates, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -241,10 +244,10 @@
size_t sz;
unsigned int i;
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: %d Writers ('wait' in qsbr_check == false)\n",
num_cores);
@@ -266,10 +269,11 @@
/* Wait until all readers have exited */
rte_eal_mp_wait_lcore();
- printf("Total RCU checks = %"PRIi64"\n", __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ printf("Total RCU checks = %"PRIi64"\n", rte_atomic_load_explicit(&checks,
+ rte_memory_order_relaxed));
printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- (__atomic_load_n(&checks, __ATOMIC_RELAXED) / RCU_SCALE_DOWN));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ (rte_atomic_load_explicit(&checks, rte_memory_order_relaxed) / RCU_SCALE_DOWN));
rte_free(t[0]);
@@ -317,8 +321,8 @@
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&update_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&updates, loop_cnt, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&update_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&updates, loop_cnt, rte_memory_order_relaxed);
rte_rcu_qsbr_thread_unregister(temp, thread_id);
@@ -389,12 +393,12 @@ static struct rte_hash *init_hash(void)
writer_done = 0;
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
- __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_seq_cst);
printf("\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\n", num_cores);
@@ -453,8 +457,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
@@ -467,12 +471,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -511,7 +515,7 @@ static struct rte_hash *init_hash(void)
printf("Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\n", num_cores);
- __atomic_store_n(&thr_id, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&thr_id, 0, rte_memory_order_relaxed);
if (all_registered == 1)
tmp_num_cores = num_cores;
@@ -570,8 +574,8 @@ static struct rte_hash *init_hash(void)
}
cycles = rte_rdtsc_precise() - begin;
- __atomic_fetch_add(&check_cycles, cycles, __ATOMIC_RELAXED);
- __atomic_fetch_add(&checks, i, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&check_cycles, cycles, rte_memory_order_relaxed);
+ rte_atomic_fetch_add_explicit(&checks, i, rte_memory_order_relaxed);
writer_done = 1;
/* Wait and check return value from reader threads */
@@ -583,12 +587,12 @@ static struct rte_hash *init_hash(void)
printf("Following numbers include calls to rte_hash functions\n");
printf("Cycles per 1 quiescent state update(online/update/offline): %"PRIi64"\n",
- __atomic_load_n(&update_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&updates, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&update_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&updates, rte_memory_order_relaxed));
printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
- __atomic_load_n(&check_cycles, __ATOMIC_RELAXED) /
- __atomic_load_n(&checks, __ATOMIC_RELAXED));
+ rte_atomic_load_explicit(&check_cycles, rte_memory_order_relaxed) /
+ rte_atomic_load_explicit(&checks, rte_memory_order_relaxed));
rte_free(t[0]);
@@ -622,10 +626,10 @@ static struct rte_hash *init_hash(void)
return TEST_SKIPPED;
}
- __atomic_store_n(&updates, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&update_cycles, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&checks, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&check_cycles, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&updates, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&update_cycles, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&checks, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&check_cycles, 0, rte_memory_order_relaxed);
num_cores = 0;
RTE_LCORE_FOREACH_WORKER(core_id) {
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index d7c5a4c..6d7a0a8 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -186,7 +186,7 @@ struct thread_params {
void *burst = NULL;
#ifdef RTE_USE_C11_MEM_MODEL
- if (__atomic_fetch_add(&lcore_count, 1, __ATOMIC_RELAXED) + 1 != 2)
+ if (rte_atomic_fetch_add_explicit(&lcore_count, 1, rte_memory_order_relaxed) + 1 != 2)
#else
if (__sync_add_and_fetch(&lcore_count, 1) != 2)
#endif
@@ -320,7 +320,7 @@ struct thread_params {
return 0;
}
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static uint64_t queue_count[RTE_MAX_LCORE];
#define TIME_MS 100
@@ -342,7 +342,8 @@ struct thread_params {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (time_diff < hz * TIME_MS / 1000) {
@@ -397,12 +398,12 @@ struct thread_params {
param.r = r;
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
lcore_f(¶m);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_ring_stress_impl.h b/app/test/test_ring_stress_impl.h
index 202d47d..8b0bfb1 100644
--- a/app/test/test_ring_stress_impl.h
+++ b/app/test/test_ring_stress_impl.h
@@ -24,7 +24,7 @@ enum {
WRK_CMD_RUN,
};
-static alignas(RTE_CACHE_LINE_SIZE) uint32_t wrk_cmd = WRK_CMD_STOP;
+static alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t) wrk_cmd = WRK_CMD_STOP;
/* test run-time in seconds */
static const uint32_t run_time = 60;
@@ -203,7 +203,7 @@ struct __rte_cache_aligned ring_elem {
* really releasing any data through 'wrk_cmd' to
* the worker.
*/
- while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) != WRK_CMD_RUN)
+ while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) != WRK_CMD_RUN)
rte_pause();
cl = rte_rdtsc_precise();
@@ -246,7 +246,7 @@ struct __rte_cache_aligned ring_elem {
lcore_stat_update(&la->stats, 1, num, tm0 + tm1, prcs);
- } while (__atomic_load_n(&wrk_cmd, __ATOMIC_RELAXED) == WRK_CMD_RUN);
+ } while (rte_atomic_load_explicit(&wrk_cmd, rte_memory_order_relaxed) == WRK_CMD_RUN);
cl = rte_rdtsc_precise() - cl;
if (prcs == 0)
@@ -360,12 +360,12 @@ struct __rte_cache_aligned ring_elem {
}
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_RUN, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_RUN, rte_memory_order_release);
rte_delay_us(run_time * US_PER_S);
/* signal worker to start test */
- __atomic_store_n(&wrk_cmd, WRK_CMD_STOP, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&wrk_cmd, WRK_CMD_STOP, rte_memory_order_release);
/* wait for workers and collect stats. */
mc = rte_lcore_id();
diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index e8767da..6777b91 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -35,7 +35,7 @@
static rte_rwlock_t sl;
static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
enum {
LC_TYPE_RDLOCK,
@@ -101,7 +101,8 @@ struct __rte_cache_aligned try_rwlock_lcore {
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcount < MAX_LOOP) {
@@ -134,12 +135,12 @@ struct __rte_cache_aligned try_rwlock_lcore {
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
/* clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
return -1;
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(NULL);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_seqlock.c b/app/test/test_seqlock.c
index bab8b0f..c5e5e64 100644
--- a/app/test/test_seqlock.c
+++ b/app/test/test_seqlock.c
@@ -22,7 +22,7 @@ struct __rte_cache_aligned data {
struct reader {
struct data *data;
- uint8_t stop;
+ RTE_ATOMIC(uint8_t) stop;
};
#define WRITER_RUNTIME 2.0 /* s */
@@ -79,7 +79,7 @@ struct reader {
struct reader *r = arg;
int rc = TEST_SUCCESS;
- while (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&
+ while (rte_atomic_load_explicit(&r->stop, rte_memory_order_relaxed) == 0 &&
rc == TEST_SUCCESS) {
struct data *data = r->data;
bool interrupted;
@@ -115,7 +115,7 @@ struct reader {
static void
reader_stop(struct reader *reader)
{
- __atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&reader->stop, 1, rte_memory_order_relaxed);
}
#define NUM_WRITERS 2 /* main lcore + one worker */
diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c
index c12d52d..010ab82 100644
--- a/app/test/test_service_cores.c
+++ b/app/test/test_service_cores.c
@@ -59,15 +59,15 @@ static int32_t dummy_mt_unsafe_cb(void *args)
* test, because two threads are concurrently in a non-MT safe callback.
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so clear pass flag */
*pass_test = 0;
@@ -86,15 +86,15 @@ static int32_t dummy_mt_safe_cb(void *args)
* that 2 threads are running the callback at the same time: MT safe
*/
uint32_t *test_params = args;
- uint32_t *lock = &test_params[0];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)&test_params[0];
uint32_t *pass_test = &test_params[1];
uint32_t exp = 0;
- int lock_taken = __atomic_compare_exchange_n(lock, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ int lock_taken = rte_atomic_compare_exchange_strong_explicit(lock, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed);
if (lock_taken) {
/* delay with the lock held */
rte_delay_ms(250);
- __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(lock, 0, rte_memory_order_relaxed);
} else {
/* 2nd thread will fail to take lock, so set pass flag */
*pass_test = 1;
@@ -748,15 +748,15 @@ static int32_t dummy_mt_safe_cb(void *args)
/* retrieve done flag and lock to add/sub */
uint32_t *done = ¶ms[0];
- uint32_t *lock = ¶ms[1];
+ RTE_ATOMIC(uint32_t) *lock = (uint32_t __rte_atomic *)¶ms[1];
while (!*done) {
- __atomic_fetch_add(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(lock, 1, rte_memory_order_relaxed);
rte_delay_us(500);
- if (__atomic_load_n(lock, __ATOMIC_RELAXED) > 1)
+ if (rte_atomic_load_explicit(lock, rte_memory_order_relaxed) > 1)
/* pass: second core has simultaneously incremented */
*done = 1;
- __atomic_fetch_sub(lock, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(lock, 1, rte_memory_order_relaxed);
}
return 0;
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 9a481f2..a29405a 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -48,7 +48,7 @@
static rte_spinlock_recursive_t slr;
static unsigned count = 0;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_spinlock_per_core(__rte_unused void *arg)
@@ -110,7 +110,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_get_timer_cycles();
while (lcount < MAX_LOOP) {
@@ -149,11 +150,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c
index c5e1caa..3f17a26 100644
--- a/app/test/test_stack_perf.c
+++ b/app/test/test_stack_perf.c
@@ -23,7 +23,7 @@
*/
static volatile unsigned int bulk_sizes[] = {8, MAX_BURST};
-static uint32_t lcore_barrier;
+static RTE_ATOMIC(uint32_t) lcore_barrier;
struct lcore_pair {
unsigned int c1;
@@ -143,8 +143,8 @@ struct thread_args {
s = args->s;
size = args->sz;
- __atomic_fetch_sub(&lcore_barrier, 1, __ATOMIC_RELAXED);
- rte_wait_until_equal_32(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(&lcore_barrier, 1, rte_memory_order_relaxed);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&lcore_barrier, 0, rte_memory_order_relaxed);
uint64_t start = rte_rdtsc();
@@ -173,7 +173,7 @@ struct thread_args {
unsigned int i;
for (i = 0; i < RTE_DIM(bulk_sizes); i++) {
- __atomic_store_n(&lcore_barrier, 2, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 2, rte_memory_order_relaxed);
args[0].sz = args[1].sz = bulk_sizes[i];
args[0].s = args[1].s = s;
@@ -206,7 +206,7 @@ struct thread_args {
int cnt = 0;
double avg;
- __atomic_store_n(&lcore_barrier, n, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, n, rte_memory_order_relaxed);
RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (++cnt >= n)
@@ -300,7 +300,7 @@ struct thread_args {
struct lcore_pair cores;
struct rte_stack *s;
- __atomic_store_n(&lcore_barrier, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_barrier, 0, rte_memory_order_relaxed);
s = rte_stack_create(STACK_NAME, STACK_SIZE, rte_socket_id(), flags);
if (s == NULL) {
diff --git a/app/test/test_threads.c b/app/test/test_threads.c
index 4ac3f26..6d6881a 100644
--- a/app/test/test_threads.c
+++ b/app/test/test_threads.c
@@ -6,12 +6,13 @@
#include <rte_thread.h>
#include <rte_debug.h>
+#include <rte_stdatomic.h>
#include "test.h"
RTE_LOG_REGISTER(threads_logtype_test, test.threads, INFO);
-static uint32_t thread_id_ready;
+static RTE_ATOMIC(uint32_t) thread_id_ready;
static uint32_t
thread_main(void *arg)
@@ -19,9 +20,9 @@
if (arg != NULL)
*(rte_thread_t *)arg = rte_thread_self();
- __atomic_store_n(&thread_id_ready, 1, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 1, rte_memory_order_release);
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 1)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 1)
;
return 0;
@@ -37,13 +38,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
@@ -61,13 +62,13 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main,
&thread_main_id) == 0, "Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_detach(thread_id) == 0,
"Failed to detach thread.");
@@ -85,7 +86,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
priority = RTE_THREAD_PRIORITY_NORMAL;
@@ -121,7 +122,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Priority set mismatches priority get");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -137,7 +138,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, NULL, thread_main, NULL) == 0,
"Failed to create thread");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset0) == 0,
@@ -190,7 +191,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes affinity thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_affinity_by_id(thread_id, &cpuset1) == 0,
@@ -198,7 +199,7 @@
RTE_TEST_ASSERT(memcmp(&cpuset0, &cpuset1, sizeof(rte_cpuset_t)) == 0,
"Failed to apply affinity attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -219,7 +220,7 @@
RTE_TEST_ASSERT(rte_thread_create(&thread_id, &attr, thread_main, NULL) == 0,
"Failed to create attributes priority thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_get_priority(thread_id, &priority) == 0,
@@ -227,7 +228,7 @@
RTE_TEST_ASSERT(priority == RTE_THREAD_PRIORITY_NORMAL,
"Failed to apply priority attributes");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
return 0;
}
@@ -243,13 +244,13 @@
thread_main, &thread_main_id) == 0,
"Failed to create thread.");
- while (__atomic_load_n(&thread_id_ready, __ATOMIC_ACQUIRE) == 0)
+ while (rte_atomic_load_explicit(&thread_id_ready, rte_memory_order_acquire) == 0)
;
RTE_TEST_ASSERT(rte_thread_equal(thread_id, thread_main_id) != 0,
"Unexpected thread id.");
- __atomic_store_n(&thread_id_ready, 2, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&thread_id_ready, 2, rte_memory_order_release);
RTE_TEST_ASSERT(rte_thread_join(thread_id, NULL) == 0,
"Failed to join thread.");
diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
index 7a6cb4c..ad4a2d8 100644
--- a/app/test/test_ticketlock.c
+++ b/app/test/test_ticketlock.c
@@ -48,7 +48,7 @@
static rte_ticketlock_recursive_t tlr;
static unsigned int count;
-static uint32_t synchro;
+static RTE_ATOMIC(uint32_t) synchro;
static int
test_ticketlock_per_core(__rte_unused void *arg)
@@ -111,7 +111,8 @@
/* wait synchro for workers */
if (lcore != rte_get_main_lcore())
- rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
+ rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
+ rte_memory_order_relaxed);
begin = rte_rdtsc_precise();
while (lcore_count[lcore] < MAX_LOOP) {
@@ -153,11 +154,11 @@
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
/* Clear synchro and start workers */
- __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
/* start synchro and launch test on main */
- __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
load_loop_fn(&lock);
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index cac8fc0..dc15a80 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -202,7 +202,7 @@ struct mytimerinfo {
/* Need to synchronize worker lcores through multiple steps. */
enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
-static uint16_t lcore_state[RTE_MAX_LCORE];
+static RTE_ATOMIC(uint16_t) lcore_state[RTE_MAX_LCORE];
static void
main_init_workers(void)
@@ -210,7 +210,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_WAITING, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_WAITING,
+ rte_memory_order_relaxed);
}
}
@@ -220,10 +221,12 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- __atomic_store_n(&lcore_state[i], WORKER_RUN_SIGNAL, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[i], WORKER_RUN_SIGNAL,
+ rte_memory_order_release);
}
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_RUNNING, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_RUNNING,
+ rte_memory_order_acquire);
}
}
@@ -233,7 +236,8 @@ struct mytimerinfo {
unsigned i;
RTE_LCORE_FOREACH_WORKER(i) {
- rte_wait_until_equal_16(&lcore_state[i], WORKER_FINISHED, __ATOMIC_ACQUIRE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[i], WORKER_FINISHED,
+ rte_memory_order_acquire);
}
}
@@ -242,8 +246,10 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- rte_wait_until_equal_16(&lcore_state[lcore_id], WORKER_RUN_SIGNAL, __ATOMIC_ACQUIRE);
- __atomic_store_n(&lcore_state[lcore_id], WORKER_RUNNING, __ATOMIC_RELEASE);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&lcore_state[lcore_id], WORKER_RUN_SIGNAL,
+ rte_memory_order_acquire);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_RUNNING,
+ rte_memory_order_release);
}
static void
@@ -251,7 +257,8 @@ struct mytimerinfo {
{
unsigned lcore_id = rte_lcore_id();
- __atomic_store_n(&lcore_state[lcore_id], WORKER_FINISHED, __ATOMIC_RELEASE);
+ rte_atomic_store_explicit(&lcore_state[lcore_id], WORKER_FINISHED,
+ rte_memory_order_release);
}
@@ -277,12 +284,12 @@ struct mytimerinfo {
unsigned int lcore_id = rte_lcore_id();
unsigned int main_lcore = rte_get_main_lcore();
int32_t my_collisions = 0;
- static uint32_t collisions;
+ static RTE_ATOMIC(uint32_t) collisions;
if (lcore_id == main_lcore) {
cb_count = 0;
test_failed = 0;
- __atomic_store_n(&collisions, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&collisions, 0, rte_memory_order_relaxed);
timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
if (timers == NULL) {
printf("Test Failed\n");
@@ -310,7 +317,7 @@ struct mytimerinfo {
my_collisions++;
}
if (my_collisions != 0)
- __atomic_fetch_add(&collisions, my_collisions, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&collisions, my_collisions, rte_memory_order_relaxed);
/* wait long enough for timers to expire */
rte_delay_ms(100);
@@ -324,7 +331,7 @@ struct mytimerinfo {
/* now check that we get the right number of callbacks */
if (lcore_id == main_lcore) {
- my_collisions = __atomic_load_n(&collisions, __ATOMIC_RELAXED);
+ my_collisions = rte_atomic_load_explicit(&collisions, rte_memory_order_relaxed);
if (my_collisions != 0)
printf("- %d timer reset collisions (OK)\n", my_collisions);
rte_timer_manage();
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 42/45] app/test-eventdev: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (40 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 41/45] app/test: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 43/45] app/test-crypto-perf: " Tyler Retzlaff
` (3 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-eventdev/test_order_atq.c | 4 ++--
app/test-eventdev/test_order_common.c | 5 +++--
app/test-eventdev/test_order_common.h | 8 ++++----
app/test-eventdev/test_order_queue.c | 4 ++--
app/test-eventdev/test_perf_common.h | 6 +++---
5 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 2fee4b4..128d3f2 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index a9894c6..0fceace 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -189,7 +189,7 @@
evt_err("failed to allocate t->expected_flow_seq memory");
goto exp_nomem;
}
- __atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&t->outstand_pkts, opt->nb_pkts, rte_memory_order_relaxed);
t->err = false;
t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
@@ -296,7 +296,8 @@
while (t->err == false) {
uint64_t new_cycles = rte_get_timer_cycles();
- int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
+ int64_t remaining = rte_atomic_load_explicit(&t->outstand_pkts,
+ rte_memory_order_relaxed);
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index d4cbc5c..7177fd8 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -48,7 +48,7 @@ struct __rte_cache_aligned test_order {
* The atomic_* is an expensive operation,Since it is a functional test,
* We are using the atomic_ operation to reduce the code complexity.
*/
- uint64_t outstand_pkts;
+ RTE_ATOMIC(uint64_t) outstand_pkts;
enum evt_test_result result;
uint32_t nb_flows;
uint64_t nb_pkts;
@@ -95,7 +95,7 @@ struct __rte_cache_aligned test_order {
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
- uint64_t *const outstand_pkts)
+ RTE_ATOMIC(uint64_t) *const outstand_pkts)
{
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
/* compare the seqn against expected value */
@@ -113,7 +113,7 @@ struct __rte_cache_aligned test_order {
*/
expected_flow_seq[flow]++;
rte_pktmbuf_free(ev->mbuf);
- __atomic_fetch_sub(outstand_pkts, 1, __ATOMIC_RELAXED);
+ rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
}
static __rte_always_inline void
@@ -132,7 +132,7 @@ struct __rte_cache_aligned test_order {
const uint8_t port = w->port_id;\
const uint32_t nb_flows = t->nb_flows;\
uint32_t *expected_flow_seq = t->expected_flow_seq;\
- uint64_t *outstand_pkts = &t->outstand_pkts;\
+ RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
if (opt->verbose_level > 1)\
printf("%s(): lcore %d dev_id %d port=%d\n",\
__func__, rte_lcore_id(), dev_id, port)
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 80eaea5..a282ab2 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -28,7 +28,7 @@
uint16_t event = rte_event_dequeue_burst(dev_id, port,
&ev, 1, 0);
if (!event) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
@@ -64,7 +64,7 @@
BURST_SIZE, 0);
if (nb_rx == 0) {
- if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
+ if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
break;
rte_pause();
continue;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index bc627de..d60b873 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -225,7 +225,7 @@ struct __rte_cache_aligned perf_elt {
* stored before updating the number of
* processed packets for worker lcores
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -270,7 +270,7 @@ struct __rte_cache_aligned perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts++;
if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
@@ -325,7 +325,7 @@ struct __rte_cache_aligned perf_elt {
/* Release fence here ensures event_prt is stored before updating the number of processed
* packets for worker lcores.
*/
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ rte_atomic_thread_fence(rte_memory_order_release);
w->processed_pkts += vec->nb_elem;
if (enable_fwd_latency) {
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 43/45] app/test-crypto-perf: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (41 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 42/45] app/test-eventdev: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 44/45] app/test-compress-perf: " Tyler Retzlaff
` (2 subsequent siblings)
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-crypto-perf/cperf_test_latency.c | 6 +++---
app/test-crypto-perf/cperf_test_pmd_cyclecount.c | 10 +++++-----
app/test-crypto-perf/cperf_test_throughput.c | 10 +++++-----
app/test-crypto-perf/cperf_test_verify.c | 10 +++++-----
4 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 99b7d7c..b8ad6bf 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -136,7 +136,7 @@ struct priv_op_data {
uint32_t imix_idx = 0;
int ret = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
if (ctx == NULL)
return 0;
@@ -341,8 +341,8 @@ struct priv_op_data {
uint16_t exp = 0;
if (ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
"cycles, time (us)");
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 4a60f6d..7191d99 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -396,7 +396,7 @@ struct pmd_cyclecount_state {
state.lcore = rte_lcore_id();
state.linearize = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
static bool warmup = true;
/*
@@ -443,8 +443,8 @@ struct pmd_cyclecount_state {
uint16_t exp = 0;
if (!opts->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
@@ -460,8 +460,8 @@ struct pmd_cyclecount_state {
state.cycles_per_enq,
state.cycles_per_deq);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index e3d266d..c0891e7 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -107,7 +107,7 @@ struct cperf_throughput_ctx {
uint8_t burst_size_idx = 0;
uint32_t imix_idx = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
@@ -277,8 +277,8 @@ struct cperf_throughput_ctx {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst Size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -298,8 +298,8 @@ struct cperf_throughput_ctx {
throughput_gbps,
cycles_per_packet);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 3548509..222c7a1 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -216,7 +216,7 @@ struct cperf_op_result {
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
uint64_t ops_failed = 0;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint64_t i;
uint16_t ops_unused = 0;
@@ -370,8 +370,8 @@ struct cperf_op_result {
uint16_t exp = 0;
if (!ctx->options->csv) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst size",
"Enqueued", "Dequeued", "Failed Enq",
@@ -388,8 +388,8 @@ struct cperf_op_result {
ops_deqd_failed,
ops_failed);
} else {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed))
printf("\n# lcore id, Buffer Size(B), "
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Failed Ops\n");
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 44/45] app/test-compress-perf: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (42 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 43/45] app/test-crypto-perf: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-14 16:35 ` [PATCH v6 45/45] app/test-bbdev: " Tyler Retzlaff
2024-05-17 16:58 ` [PATCH v6 00/45] use " David Marchand
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-compress-perf/comp_perf_test_common.h | 2 +-
app/test-compress-perf/comp_perf_test_cyclecount.c | 4 ++--
app/test-compress-perf/comp_perf_test_throughput.c | 10 +++++-----
app/test-compress-perf/comp_perf_test_verify.c | 6 +++---
4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/app/test-compress-perf/comp_perf_test_common.h b/app/test-compress-perf/comp_perf_test_common.h
index d039e5a..085e269 100644
--- a/app/test-compress-perf/comp_perf_test_common.h
+++ b/app/test-compress-perf/comp_perf_test_common.h
@@ -14,7 +14,7 @@ struct cperf_mem_resources {
uint16_t qp_id;
uint8_t lcore_id;
- uint16_t print_info_once;
+ RTE_ATOMIC(uint16_t) print_info_once;
uint32_t total_bufs;
uint8_t *compressed_data;
diff --git a/app/test-compress-perf/comp_perf_test_cyclecount.c b/app/test-compress-perf/comp_perf_test_cyclecount.c
index 4d336ec..64e8faa 100644
--- a/app/test-compress-perf/comp_perf_test_cyclecount.c
+++ b/app/test-compress-perf/comp_perf_test_cyclecount.c
@@ -498,8 +498,8 @@ struct cperf_cyclecount_ctx {
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
diff --git a/app/test-compress-perf/comp_perf_test_throughput.c b/app/test-compress-perf/comp_perf_test_throughput.c
index 1f7072d..089d19c 100644
--- a/app/test-compress-perf/comp_perf_test_throughput.c
+++ b/app/test-compress-perf/comp_perf_test_throughput.c
@@ -336,7 +336,7 @@
struct cperf_benchmark_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->ver.options;
uint32_t lcore = rte_lcore_id();
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
int i, ret = EXIT_SUCCESS;
ctx->ver.mem.lcore_id = lcore;
@@ -345,8 +345,8 @@
/*
* printing information about current compression thread
*/
- if (__atomic_compare_exchange_n(&ctx->ver.mem.print_info_once, &exp,
- 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ if (rte_atomic_compare_exchange_strong_explicit(&ctx->ver.mem.print_info_once, &exp,
+ 1, rte_memory_order_relaxed, rte_memory_order_relaxed))
printf(" lcore: %u,"
" driver name: %s,"
" device name: %s,"
@@ -413,8 +413,8 @@
}
exp = 0;
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("\n%12s%6s%12s%17s%15s%16s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]",
"Comp [Gbps]", "Decomp [Gbps]");
diff --git a/app/test-compress-perf/comp_perf_test_verify.c b/app/test-compress-perf/comp_perf_test_verify.c
index 7bd1807..09d97c5 100644
--- a/app/test-compress-perf/comp_perf_test_verify.c
+++ b/app/test-compress-perf/comp_perf_test_verify.c
@@ -396,7 +396,7 @@
struct cperf_verify_ctx *ctx = test_ctx;
struct comp_test_data *test_data = ctx->options;
int ret = EXIT_SUCCESS;
- static uint16_t display_once;
+ static RTE_ATOMIC(uint16_t) display_once;
uint32_t lcore = rte_lcore_id();
uint16_t exp = 0;
@@ -452,8 +452,8 @@
test_data->input_data_sz * 100;
if (!ctx->silent) {
- if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+ if (rte_atomic_compare_exchange_strong_explicit(&display_once, &exp, 1,
+ rte_memory_order_relaxed, rte_memory_order_relaxed)) {
printf("%12s%6s%12s%17s\n",
"lcore id", "Level", "Comp size", "Comp ratio [%]");
}
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* [PATCH v6 45/45] app/test-bbdev: use rte stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (43 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 44/45] app/test-compress-perf: " Tyler Retzlaff
@ 2024-05-14 16:35 ` Tyler Retzlaff
2024-05-17 16:58 ` [PATCH v6 00/45] use " David Marchand
45 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-14 16:35 UTC (permalink / raw)
To: dev
Cc: Mattias Rönnblom, Morten Brørup, Abdullah Sevincer,
Ajit Khaparde, Alok Prasad, Anatoly Burakov, Andrew Rybchenko,
Anoob Joseph, Bruce Richardson, Byron Marohn, Chenbo Xia,
Chengwen Feng, Ciara Loftus, Ciara Power, Dariusz Sosnowski,
David Hunt, Devendra Singh Rawat, Erik Gabriel Carrillo,
Guoyang Zhou, Harman Kalra, Harry van Haaren,
Honnappa Nagarahalli, Jakub Grajciar, Jerin Jacob,
Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai, Jingjing Wu,
Joshua Washington, Joyce Kong, Junfeng Guo, Kevin Laatz,
Konstantin Ananyev, Liang Ma, Long Li, Maciej Czekaj,
Matan Azrad, Maxime Coquelin, Nicolas Chautru, Ori Kam,
Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy, Reshma Pattan,
Rosen Xu, Ruifeng Wang, Rushil Gupta, Sameh Gobriel,
Sivaprasad Tummala, Somnath Kotur, Stephen Hemminger,
Suanming Mou, Sunil Kumar Kori, Sunil Uttarwar, Tetsuya Mukawa,
Vamsi Attunuru, Viacheslav Ovsiienko, Vladimir Medvedkin,
Xiaoyun Wang, Yipeng Wang, Yisen Zhuang, Ziyang Xuan,
Tyler Retzlaff
Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
---
app/test-bbdev/test_bbdev_perf.c | 183 +++++++++++++++++++++++----------------
1 file changed, 110 insertions(+), 73 deletions(-)
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index dcce00a..9694ed3 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -144,7 +144,7 @@ struct test_op_params {
uint16_t num_to_process;
uint16_t num_lcores;
int vector_mask;
- uint16_t sync;
+ RTE_ATOMIC(uint16_t) sync;
struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
};
@@ -159,9 +159,9 @@ struct thread_params {
uint8_t iter_count;
double iter_average;
double bler;
- uint16_t nb_dequeued;
- int16_t processing_status;
- uint16_t burst_sz;
+ RTE_ATOMIC(uint16_t) nb_dequeued;
+ RTE_ATOMIC(int16_t) processing_status;
+ RTE_ATOMIC(uint16_t) burst_sz;
struct test_op_params *op_params;
struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
@@ -3195,56 +3195,64 @@ typedef int (test_case_function)(struct active_device *ad,
}
if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
printf(
"Dequeue interrupt handler called for incorrect event!\n");
return;
}
- burst_sz = __atomic_load_n(&tp->burst_sz, __ATOMIC_RELAXED);
+ burst_sz = rte_atomic_load_explicit(&tp->burst_sz, rte_memory_order_relaxed);
num_ops = tp->op_params->num_to_process;
if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
&tp->dec_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
deq = rte_bbdev_dequeue_fft_ops(dev_id, queue_id,
&tp->fft_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
deq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
&tp->mldts_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
else /*RTE_BBDEV_OP_TURBO_ENC*/
deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&tp->enc_ops[
- __atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED)],
+ rte_atomic_load_explicit(&tp->nb_dequeued,
+ rte_memory_order_relaxed)],
burst_sz);
if (deq < burst_sz) {
printf(
"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
burst_sz, deq);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
- if (__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) + deq < num_ops) {
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ if (rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) + deq < num_ops) {
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
return;
}
@@ -3288,7 +3296,8 @@ typedef int (test_case_function)(struct active_device *ad,
if (ret) {
printf("Buffers validation failed\n");
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
}
switch (test_vector.op_type) {
@@ -3315,7 +3324,8 @@ typedef int (test_case_function)(struct active_device *ad,
break;
default:
printf("Unknown op type: %d\n", test_vector.op_type);
- __atomic_store_n(&tp->processing_status, TEST_FAILED, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, TEST_FAILED,
+ rte_memory_order_relaxed);
return;
}
@@ -3324,7 +3334,7 @@ typedef int (test_case_function)(struct active_device *ad,
tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
((double)total_time / (double)rte_get_tsc_hz());
- __atomic_fetch_add(&tp->nb_dequeued, deq, __ATOMIC_RELAXED);
+ rte_atomic_fetch_add_explicit(&tp->nb_dequeued, deq, rte_memory_order_relaxed);
}
static int
@@ -3362,10 +3372,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3415,15 +3426,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3459,10 +3472,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3506,15 +3520,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3549,10 +3565,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3592,15 +3609,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3636,10 +3655,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3681,15 +3701,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3725,10 +3747,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops,
num_to_process);
@@ -3769,15 +3792,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3811,10 +3836,11 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->processing_status, 0, rte_memory_order_relaxed);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, num_to_process);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_to_process);
@@ -3851,15 +3877,17 @@ typedef int (test_case_function)(struct active_device *ad,
* the number of operations is not a multiple of
* burst size.
*/
- __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
+ rte_memory_order_relaxed);
/* Wait until processing of previous batch is
* completed
*/
- rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->nb_dequeued, enqueued,
+ rte_memory_order_relaxed);
}
if (j != TEST_REPETITIONS - 1)
- __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tp->nb_dequeued, 0, rte_memory_order_relaxed);
}
return TEST_SUCCESS;
@@ -3894,7 +3922,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4013,7 +4042,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4148,7 +4178,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4271,7 +4302,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4402,7 +4434,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4503,7 +4536,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
num_ops);
@@ -4604,7 +4638,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_fft_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4702,7 +4737,8 @@ typedef int (test_case_function)(struct active_device *ad,
bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
- rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_wait_until_equal_16((uint16_t *)(uintptr_t)&tp->op_params->sync, SYNC_START,
+ rte_memory_order_relaxed);
ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
@@ -4898,7 +4934,7 @@ typedef int (test_case_function)(struct active_device *ad,
else
return TEST_SKIPPED;
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -4921,7 +4957,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = bler_function(&t_params[0]);
/* Main core is always used */
@@ -5024,7 +5060,7 @@ typedef int (test_case_function)(struct active_device *ad,
throughput_function = throughput_pmd_lcore_enc;
}
- __atomic_store_n(&op_params->sync, SYNC_WAIT, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_WAIT, rte_memory_order_relaxed);
/* Main core is set at first entry */
t_params[0].dev_id = ad->dev_id;
@@ -5047,7 +5083,7 @@ typedef int (test_case_function)(struct active_device *ad,
&t_params[used_cores++], lcore_id);
}
- __atomic_store_n(&op_params->sync, SYNC_START, __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&op_params->sync, SYNC_START, rte_memory_order_relaxed);
ret = throughput_function(&t_params[0]);
/* Main core is always used */
@@ -5077,29 +5113,30 @@ typedef int (test_case_function)(struct active_device *ad,
* Wait for main lcore operations.
*/
tp = &t_params[0];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
+ (rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed) !=
TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status, rte_memory_order_relaxed);
/* Wait for worker lcores operations */
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
tp = &t_params[used_cores];
- while ((__atomic_load_n(&tp->nb_dequeued, __ATOMIC_RELAXED) <
+ while ((rte_atomic_load_explicit(&tp->nb_dequeued, rte_memory_order_relaxed) <
op_params->num_to_process) &&
- (__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED) !=
- TEST_FAILED))
+ (rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed) != TEST_FAILED))
rte_pause();
tp->ops_per_sec /= TEST_REPETITIONS;
tp->mbps /= TEST_REPETITIONS;
- ret |= (int)__atomic_load_n(&tp->processing_status, __ATOMIC_RELAXED);
+ ret |= (int)rte_atomic_load_explicit(&tp->processing_status,
+ rte_memory_order_relaxed);
}
/* Print throughput if test passed */
--
1.8.3.1
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v6 00/45] use stdatomic API
2024-05-14 16:35 ` [PATCH v6 " Tyler Retzlaff
` (44 preceding siblings ...)
2024-05-14 16:35 ` [PATCH v6 45/45] app/test-bbdev: " Tyler Retzlaff
@ 2024-05-17 16:58 ` David Marchand
2024-05-17 20:04 ` Tyler Retzlaff
45 siblings, 1 reply; 300+ messages in thread
From: David Marchand @ 2024-05-17 16:58 UTC (permalink / raw)
To: Tyler Retzlaff
Cc: dev, Mattias Rönnblom, Morten Brørup,
Abdullah Sevincer, Ajit Khaparde, Alok Prasad, Anatoly Burakov,
Andrew Rybchenko, Anoob Joseph, Bruce Richardson, Byron Marohn,
Chenbo Xia, Chengwen Feng, Ciara Loftus, Ciara Power,
Dariusz Sosnowski, David Hunt, Devendra Singh Rawat,
Erik Gabriel Carrillo, Guoyang Zhou, Harman Kalra,
Harry van Haaren, Honnappa Nagarahalli, Jakub Grajciar,
Jerin Jacob, Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai,
Jingjing Wu, Joshua Washington, Joyce Kong, Junfeng Guo,
Kevin Laatz, Konstantin Ananyev, Liang Ma, Long Li,
Maciej Czekaj, Matan Azrad, Maxime Coquelin, Nicolas Chautru,
Ori Kam, Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy,
Reshma Pattan, Rosen Xu, Ruifeng Wang, Rushil Gupta,
Sameh Gobriel, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang,
Yipeng Wang, Yisen Zhuang, Ziyang Xuan
On Tue, May 14, 2024 at 6:36 PM Tyler Retzlaff
<roretzla@linux.microsoft.com> wrote:
>
> This series converts all non-generic built atomics to use the rte_atomic
> macros that allow optional enablement of standard C11 atomics.
>
> Use of generic atomics for non-scalar types are not converted in this
> change and will be evaluated as a part of a separate series.
> Specifically conversion of lib/lpm and drivers/x/cnxk will be addressed
> in a separate series to address use of generics.
I adjusted some more 4 space indents (trying to preserve the beauty of
the pre-existing indent when possible), and applied this series.
Thanks Tyler.
--
David Marchand
^ permalink raw reply [flat|nested] 300+ messages in thread
* Re: [PATCH v6 00/45] use stdatomic API
2024-05-17 16:58 ` [PATCH v6 00/45] use " David Marchand
@ 2024-05-17 20:04 ` Tyler Retzlaff
0 siblings, 0 replies; 300+ messages in thread
From: Tyler Retzlaff @ 2024-05-17 20:04 UTC (permalink / raw)
To: David Marchand
Cc: dev, Mattias Rönnblom, Morten Brørup,
Abdullah Sevincer, Ajit Khaparde, Alok Prasad, Anatoly Burakov,
Andrew Rybchenko, Anoob Joseph, Bruce Richardson, Byron Marohn,
Chenbo Xia, Chengwen Feng, Ciara Loftus, Ciara Power,
Dariusz Sosnowski, David Hunt, Devendra Singh Rawat,
Erik Gabriel Carrillo, Guoyang Zhou, Harman Kalra,
Harry van Haaren, Honnappa Nagarahalli, Jakub Grajciar,
Jerin Jacob, Jeroen de Borst, Jian Wang, Jiawen Wu, Jie Hai,
Jingjing Wu, Joshua Washington, Joyce Kong, Junfeng Guo,
Kevin Laatz, Konstantin Ananyev, Liang Ma, Long Li,
Maciej Czekaj, Matan Azrad, Maxime Coquelin, Nicolas Chautru,
Ori Kam, Pavan Nikhilesh, Peter Mccarthy, Rahul Lakkireddy,
Reshma Pattan, Rosen Xu, Ruifeng Wang, Rushil Gupta,
Sameh Gobriel, Sivaprasad Tummala, Somnath Kotur,
Stephen Hemminger, Suanming Mou, Sunil Kumar Kori,
Sunil Uttarwar, Tetsuya Mukawa, Vamsi Attunuru,
Viacheslav Ovsiienko, Vladimir Medvedkin, Xiaoyun Wang,
Yipeng Wang, Yisen Zhuang, Ziyang Xuan
On Fri, May 17, 2024 at 06:58:57PM +0200, David Marchand wrote:
> On Tue, May 14, 2024 at 6:36 PM Tyler Retzlaff
> <roretzla@linux.microsoft.com> wrote:
> >
> > This series converts all non-generic built atomics to use the rte_atomic
> > macros that allow optional enablement of standard C11 atomics.
> >
> > Use of generic atomics for non-scalar types are not converted in this
> > change and will be evaluated as a part of a separate series.
> > Specifically conversion of lib/lpm and drivers/x/cnxk will be addressed
> > in a separate series to address use of generics.
>
> I adjusted some more 4 space indents (trying to preserve the beauty of
> the pre-existing indent when possible), and applied this series.
> Thanks Tyler.
Appreciate it David!
>
>
> --
> David Marchand
^ permalink raw reply [flat|nested] 300+ messages in thread