From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 267FEA0569; Thu, 12 Mar 2020 08:47:45 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C470D1C0BE; Thu, 12 Mar 2020 08:46:11 +0100 (CET) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id 7EF351C0B0 for ; Thu, 12 Mar 2020 08:46:10 +0100 (CET) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 14CC1FEC; Thu, 12 Mar 2020 00:46:10 -0700 (PDT) Received: from phil-VirtualBox.shanghai.arm.com (phil-VirtualBox.shanghai.arm.com [10.169.109.150]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 8F4A13F534; Thu, 12 Mar 2020 00:46:06 -0700 (PDT) From: Phil Yang To: thomas@monjalon.net, harry.van.haaren@intel.com, konstantin.ananyev@intel.com, stephen@networkplumber.org, maxime.coquelin@redhat.com, dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, hemant.agrawal@nxp.com, Honnappa.Nagarahalli@arm.com, gavin.hu@arm.com, ruifeng.wang@arm.com, joyce.kong@arm.com, nd@arm.com Date: Thu, 12 Mar 2020 15:44:31 +0800 Message-Id: <1583999071-22872-11-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1583999071-22872-1-git-send-email-phil.yang@arm.com> References: <1583862551-2049-1-git-send-email-phil.yang@arm.com> <1583999071-22872-1-git-send-email-phil.yang@arm.com> Subject: [dpdk-dev] [PATCH v2 10/10] service: relax barriers with C11 atomic operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" To guarantee the inter-threads visibility of the shareable domain, it uses a lot of rte_smp_r/wmb in the service library. This patch relaxed these barriers for service by using c11 atomic one-way barrier operations. Signed-off-by: Phil Yang Reviewed-by: Ruifeng Wang Reviewed-by: Gavin Hu --- lib/librte_eal/common/rte_service.c | 45 ++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c index 96a59b6..9c02c24 100644 --- a/lib/librte_eal/common/rte_service.c +++ b/lib/librte_eal/common/rte_service.c @@ -176,9 +176,11 @@ rte_service_set_stats_enable(uint32_t id, int32_t enabled) SERVICE_VALID_GET_OR_ERR_RET(id, s, 0); if (enabled) - s->internal_flags |= SERVICE_F_STATS_ENABLED; + __atomic_or_fetch(&s->internal_flags, SERVICE_F_STATS_ENABLED, + __ATOMIC_RELEASE); else - s->internal_flags &= ~(SERVICE_F_STATS_ENABLED); + __atomic_and_fetch(&s->internal_flags, + ~(SERVICE_F_STATS_ENABLED), __ATOMIC_RELEASE); return 0; } @@ -190,9 +192,11 @@ rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled) SERVICE_VALID_GET_OR_ERR_RET(id, s, 0); if (enabled) - s->internal_flags |= SERVICE_F_START_CHECK; + __atomic_or_fetch(&s->internal_flags, SERVICE_F_START_CHECK, + __ATOMIC_RELEASE); else - s->internal_flags &= ~(SERVICE_F_START_CHECK); + __atomic_and_fetch(&s->internal_flags, ~(SERVICE_F_START_CHECK), + __ATOMIC_RELEASE); return 0; } @@ -261,8 +265,8 @@ rte_service_component_register(const struct rte_service_spec *spec, s->spec = *spec; s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK; - rte_smp_wmb(); - rte_service_count++; + /* make sure the counter update after the state change. */ + __atomic_add_fetch(&rte_service_count, 1, __ATOMIC_RELEASE); if (id_ptr) *id_ptr = free_slot; @@ -278,9 +282,10 @@ rte_service_component_unregister(uint32_t id) SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); rte_service_count--; - rte_smp_wmb(); - s->internal_flags &= ~(SERVICE_F_REGISTERED); + /* make sure the counter update before the state change. */ + __atomic_and_fetch(&s->internal_flags, ~(SERVICE_F_REGISTERED), + __ATOMIC_RELEASE); /* clear the run-bit in all cores */ for (i = 0; i < RTE_MAX_LCORE; i++) @@ -298,11 +303,12 @@ rte_service_component_runstate_set(uint32_t id, uint32_t runstate) SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); if (runstate) - s->comp_runstate = RUNSTATE_RUNNING; + __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING, + __ATOMIC_RELEASE); else - s->comp_runstate = RUNSTATE_STOPPED; + __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED, + __ATOMIC_RELEASE); - rte_smp_wmb(); return 0; } @@ -313,11 +319,12 @@ rte_service_runstate_set(uint32_t id, uint32_t runstate) SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); if (runstate) - s->app_runstate = RUNSTATE_RUNNING; + __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING, + __ATOMIC_RELEASE); else - s->app_runstate = RUNSTATE_STOPPED; + __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED, + __ATOMIC_RELEASE); - rte_smp_wmb(); return 0; } @@ -439,7 +446,8 @@ service_runner_func(void *arg) const int lcore = rte_lcore_id(); struct core_state *cs = &lcore_states[lcore]; - while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) { + while (__atomic_load_n(&cs->runstate, + __ATOMIC_ACQUIRE) == RUNSTATE_RUNNING) { const uint64_t service_mask = cs->service_mask; for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { @@ -450,8 +458,6 @@ service_runner_func(void *arg) } cs->loops++; - - rte_smp_rmb(); } lcore_config[lcore].state = WAIT; @@ -660,9 +666,8 @@ rte_service_lcore_add(uint32_t lcore) /* ensure that after adding a core the mask and state are defaults */ lcore_states[lcore].service_mask = 0; - lcore_states[lcore].runstate = RUNSTATE_STOPPED; - - rte_smp_wmb(); + __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED, + __ATOMIC_RELEASE); return rte_eal_wait_lcore(lcore); } -- 2.7.4