From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D6C0643183; Tue, 17 Oct 2023 01:09:41 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CA7B040ED8; Tue, 17 Oct 2023 01:09:14 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 55CD740DD8 for ; Tue, 17 Oct 2023 01:09:08 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id 182C020B74C6; Mon, 16 Oct 2023 16:09:06 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 182C020B74C6 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1697497747; bh=6TCazu4nINp4uXdppc0ZvaHRmnXfksv/v+GKN6FJ6kU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=DYDSkLjJqdKYcgz/nNzO/8thyrBYEKB4lvpnFVaWSeXTaad4zzZlJx+j7LhuyBBYn guAn5KTElpN5qfd/E/yGqHoWXd5Fd5vhw5irc+MheqPuuJpWR79AAyiK4e4sQ+GNdB HhLveqTc5Kx/dg6LYDOld0VnQGCJj83R97NNFk4k= From: Tyler Retzlaff To: dev@dpdk.org Cc: Akhil Goyal , Anatoly Burakov , Andrew Rybchenko , Bruce Richardson , Chenbo Xia , Ciara Power , David Christensen , David Hunt , Dmitry Kozlyuk , Dmitry Malloy , Elena Agostini , Erik Gabriel Carrillo , Fan Zhang , Ferruh Yigit , Harman Kalra , Harry van Haaren , Honnappa Nagarahalli , Jerin Jacob , Konstantin Ananyev , Matan Azrad , Maxime Coquelin , Narcisa Ana Maria Vasile , Nicolas Chautru , Olivier Matz , Ori Kam , Pallavi Kadam , Pavan Nikhilesh , Reshma Pattan , Sameh Gobriel , Shijith Thotton , Sivaprasad Tummala , Stephen Hemminger , Suanming Mou , Sunil Kumar Kori , Thomas Monjalon , Viacheslav Ovsiienko , Vladimir Medvedkin , Yipeng Wang , Tyler Retzlaff Subject: [PATCH 06/21] eventdev: use rte optional stdatomic API Date: Mon, 16 Oct 2023 16:08:50 -0700 Message-Id: <1697497745-20664-7-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> References: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding rte_atomic_xxx optional stdatomic API Signed-off-by: Tyler Retzlaff --- drivers/event/cnxk/cnxk_tim_worker.h | 4 +-- lib/eventdev/rte_event_timer_adapter.c | 66 +++++++++++++++++----------------- lib/eventdev/rte_event_timer_adapter.h | 2 +- 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/drivers/event/cnxk/cnxk_tim_worker.h b/drivers/event/cnxk/cnxk_tim_worker.h index f0857f2..f530d8c 100644 --- a/drivers/event/cnxk/cnxk_tim_worker.h +++ b/drivers/event/cnxk/cnxk_tim_worker.h @@ -314,7 +314,7 @@ tim->impl_opaque[0] = (uintptr_t)chunk; tim->impl_opaque[1] = (uintptr_t)bkt; - __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&tim->state, RTE_EVENT_TIMER_ARMED, rte_memory_order_release); cnxk_tim_bkt_inc_nent(bkt); cnxk_tim_bkt_dec_lock_relaxed(bkt); @@ -425,7 +425,7 @@ tim->impl_opaque[0] = (uintptr_t)chunk; tim->impl_opaque[1] = (uintptr_t)bkt; - __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&tim->state, RTE_EVENT_TIMER_ARMED, rte_memory_order_release); cnxk_tim_bkt_inc_nent(bkt); cnxk_tim_bkt_dec_lock_relaxed(bkt); diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c index 427c4c6..2746670 100644 --- a/lib/eventdev/rte_event_timer_adapter.c +++ b/lib/eventdev/rte_event_timer_adapter.c @@ -630,12 +630,12 @@ struct swtim { uint32_t timer_data_id; /* Track which cores have actually armed a timer */ struct { - uint16_t v; + RTE_ATOMIC(uint16_t) v; } __rte_cache_aligned in_use[RTE_MAX_LCORE]; /* Track which cores' timer lists should be polled */ - unsigned int poll_lcores[RTE_MAX_LCORE]; + RTE_ATOMIC(unsigned int) poll_lcores[RTE_MAX_LCORE]; /* The number of lists that should be polled */ - int n_poll_lcores; + RTE_ATOMIC(int) n_poll_lcores; /* Timers which have expired and can be returned to a mempool */ struct rte_timer *expired_timers[EXP_TIM_BUF_SZ]; /* The number of timers that can be returned to a mempool */ @@ -669,10 +669,10 @@ struct swtim { if (unlikely(sw->in_use[lcore].v == 0)) { sw->in_use[lcore].v = 1; - n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, - __ATOMIC_RELAXED); - __atomic_store_n(&sw->poll_lcores[n_lcores], lcore, - __ATOMIC_RELAXED); + n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1, + rte_memory_order_relaxed); + rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore, + rte_memory_order_relaxed); } ret = event_buffer_add(&sw->buffer, &evtim->ev); @@ -719,8 +719,8 @@ struct swtim { sw->stats.evtim_exp_count++; if (type == SINGLE) - __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED, + rte_memory_order_release); } if (event_buffer_batch_ready(&sw->buffer)) { @@ -846,7 +846,7 @@ struct swtim { if (swtim_did_tick(sw)) { rte_timer_alt_manage(sw->timer_data_id, - sw->poll_lcores, + (unsigned int *)(uintptr_t)sw->poll_lcores, sw->n_poll_lcores, swtim_callback); @@ -1027,7 +1027,7 @@ struct swtim { /* Free outstanding timers */ rte_timer_stop_all(sw->timer_data_id, - sw->poll_lcores, + (unsigned int *)(uintptr_t)sw->poll_lcores, sw->n_poll_lcores, swtim_free_tim, sw); @@ -1142,7 +1142,7 @@ struct swtim { uint64_t cur_cycles; /* Check that timer is armed */ - n_state = __atomic_load_n(&evtim->state, __ATOMIC_ACQUIRE); + n_state = rte_atomic_load_explicit(&evtim->state, rte_memory_order_acquire); if (n_state != RTE_EVENT_TIMER_ARMED) return -EINVAL; @@ -1201,15 +1201,15 @@ struct swtim { * The atomic compare-and-swap operation can prevent the race condition * on in_use flag between multiple non-EAL threads. */ - if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v, - &exp_state, 1, 0, - __ATOMIC_RELAXED, __ATOMIC_RELAXED))) { + if (unlikely(rte_atomic_compare_exchange_strong_explicit(&sw->in_use[lcore_id].v, + &exp_state, 1, + rte_memory_order_relaxed, rte_memory_order_relaxed))) { EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll", lcore_id); - n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1, - __ATOMIC_RELAXED); - __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id, - __ATOMIC_RELAXED); + n_lcores = rte_atomic_fetch_add_explicit(&sw->n_poll_lcores, 1, + rte_memory_order_relaxed); + rte_atomic_store_explicit(&sw->poll_lcores[n_lcores], lcore_id, + rte_memory_order_relaxed); } ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims, @@ -1223,7 +1223,7 @@ struct swtim { type = get_timer_type(adapter); for (i = 0; i < nb_evtims; i++) { - n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE); + n_state = rte_atomic_load_explicit(&evtims[i]->state, rte_memory_order_acquire); if (n_state == RTE_EVENT_TIMER_ARMED) { rte_errno = EALREADY; break; @@ -1235,9 +1235,9 @@ struct swtim { if (unlikely(check_destination_event_queue(evtims[i], adapter) < 0)) { - __atomic_store_n(&evtims[i]->state, + rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_ERROR, - __ATOMIC_RELAXED); + rte_memory_order_relaxed); rte_errno = EINVAL; break; } @@ -1250,15 +1250,15 @@ struct swtim { ret = get_timeout_cycles(evtims[i], adapter, &cycles); if (unlikely(ret == -1)) { - __atomic_store_n(&evtims[i]->state, + rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_ERROR_TOOLATE, - __ATOMIC_RELAXED); + rte_memory_order_relaxed); rte_errno = EINVAL; break; } else if (unlikely(ret == -2)) { - __atomic_store_n(&evtims[i]->state, + rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_ERROR_TOOEARLY, - __ATOMIC_RELAXED); + rte_memory_order_relaxed); rte_errno = EINVAL; break; } @@ -1267,9 +1267,9 @@ struct swtim { type, lcore_id, NULL, evtims[i]); if (ret < 0) { /* tim was in RUNNING or CONFIG state */ - __atomic_store_n(&evtims[i]->state, + rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_ERROR, - __ATOMIC_RELEASE); + rte_memory_order_release); break; } @@ -1277,8 +1277,8 @@ struct swtim { /* RELEASE ordering guarantees the adapter specific value * changes observed before the update of state. */ - __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_ARMED, + rte_memory_order_release); } if (i < nb_evtims) @@ -1320,7 +1320,7 @@ struct swtim { /* ACQUIRE ordering guarantees the access of implementation * specific opaque data under the correct state. */ - n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE); + n_state = rte_atomic_load_explicit(&evtims[i]->state, rte_memory_order_acquire); if (n_state == RTE_EVENT_TIMER_CANCELED) { rte_errno = EALREADY; break; @@ -1346,8 +1346,8 @@ struct swtim { * to make sure the state update data observed between * threads. */ - __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED, + rte_memory_order_release); } return i; diff --git a/lib/eventdev/rte_event_timer_adapter.h b/lib/eventdev/rte_event_timer_adapter.h index fbdddf8..49e646a 100644 --- a/lib/eventdev/rte_event_timer_adapter.h +++ b/lib/eventdev/rte_event_timer_adapter.h @@ -498,7 +498,7 @@ struct rte_event_timer { * implementation specific values to share between the arm and cancel * operations. The application should not modify this field. */ - enum rte_event_timer_state state; + RTE_ATOMIC(enum rte_event_timer_state) state; /**< State of the event timer. */ uint8_t user_meta[]; /**< Memory to store user specific metadata. -- 1.8.3.1