DPDK patches and discussions
 help / color / mirror / Atom feed
From: Asaf Penso <asafp@nvidia.com>
To: Eli Britstein <elibr@nvidia.com>, "dev@dpdk.org" <dev@dpdk.org>
Cc: Eli Britstein <elibr@nvidia.com>
Subject: Re: [dpdk-dev] [PATCH] eal: fix build with conflicting libc variable	memory_order
Date: Wed, 14 Oct 2020 13:34:24 +0000	[thread overview]
Message-ID: <DM5PR12MB240614D58ECDC154071873ECCD050@DM5PR12MB2406.namprd12.prod.outlook.com> (raw)
In-Reply-To: <20201014061909.9957-1-elibr@nvidia.com>

>-----Original Message-----
>From: dev <dev-bounces@dpdk.org> On Behalf Of Eli Britstein
>Sent: Wednesday, October 14, 2020 9:19 AM
>To: dev@dpdk.org
>Cc: Eli Britstein <elibr@nvidia.com>
>Subject: [dpdk-dev] [PATCH] eal: fix build with conflicting libc variable
>memory_order
>
>The cited commit introduced functions with 'int memory_order' argument.
>The C11 standard section 7.17.1.4 defines 'memory_order' as the
>"enumerated type whose enumerators identify memory ordering
>constraints".
>Applications that use the standard enum (includes stdatomic.h), will fail
>compilation with:
>error: declaration of 'memory_order' shadows a global declaration
>    [-Werror=shadow]
>     rte_atomic_thread_fence(int memory_order) Fix it by changing the
>argument name 'memory_order' to 'memorder'.
>
>Fixes: 672a15056380 ("eal: add wrapper for C11 atomic thread fence")
>Signed-off-by: Eli Britstein <elibr@nvidia.com>
Reviewed-by: Asaf Penso <asafp@nvidia.com>

>---
> lib/librte_eal/arm/include/rte_atomic_32.h  | 4 ++--
>lib/librte_eal/arm/include/rte_atomic_64.h  | 4 ++--
>lib/librte_eal/include/generic/rte_atomic.h | 2 +-
> lib/librte_eal/ppc/include/rte_atomic.h     | 4 ++--
> lib/librte_eal/x86/include/rte_atomic.h     | 6 +++---
> 5 files changed, 10 insertions(+), 10 deletions(-)
>
>diff --git a/lib/librte_eal/arm/include/rte_atomic_32.h
>b/lib/librte_eal/arm/include/rte_atomic_32.h
>index 9d0568d497..fe48ab428e 100644
>--- a/lib/librte_eal/arm/include/rte_atomic_32.h
>+++ b/lib/librte_eal/arm/include/rte_atomic_32.h
>@@ -34,9 +34,9 @@ extern "C" {
> #define rte_io_rmb() rte_rmb()
>
> static __rte_always_inline void
>-rte_atomic_thread_fence(int memory_order)
>+rte_atomic_thread_fence(int memorder)
> {
>-	__atomic_thread_fence(memory_order);
>+	__atomic_thread_fence(memorder);
> }
>
> #ifdef __cplusplus
>diff --git a/lib/librte_eal/arm/include/rte_atomic_64.h
>b/lib/librte_eal/arm/include/rte_atomic_64.h
>index c518559bc9..20dd6c75dd 100644
>--- a/lib/librte_eal/arm/include/rte_atomic_64.h
>+++ b/lib/librte_eal/arm/include/rte_atomic_64.h
>@@ -38,9 +38,9 @@ extern "C" {
> #define rte_io_rmb() rte_rmb()
>
> static __rte_always_inline void
>-rte_atomic_thread_fence(int memory_order)
>+rte_atomic_thread_fence(int memorder)
> {
>-	__atomic_thread_fence(memory_order);
>+	__atomic_thread_fence(memorder);
> }
>
> /*------------------------ 128 bit atomic operations -------------------------*/ diff --
>git a/lib/librte_eal/include/generic/rte_atomic.h
>b/lib/librte_eal/include/generic/rte_atomic.h
>index d1255b2d8c..276272f40b 100644
>--- a/lib/librte_eal/include/generic/rte_atomic.h
>+++ b/lib/librte_eal/include/generic/rte_atomic.h
>@@ -122,7 +122,7 @@ static inline void rte_io_rmb(void);
> /**
>  * Synchronization fence between threads based on the specified memory
>order.
>  */
>-static inline void rte_atomic_thread_fence(int memory_order);
>+static inline void rte_atomic_thread_fence(int memorder);
>
> /*------------------------- 16 bit atomic operations -------------------------*/
>
>diff --git a/lib/librte_eal/ppc/include/rte_atomic.h
>b/lib/librte_eal/ppc/include/rte_atomic.h
>index a91989930b..6a7e65210c 100644
>--- a/lib/librte_eal/ppc/include/rte_atomic.h
>+++ b/lib/librte_eal/ppc/include/rte_atomic.h
>@@ -37,9 +37,9 @@ extern "C" {
> #define rte_io_rmb() rte_rmb()
>
> static __rte_always_inline void
>-rte_atomic_thread_fence(int memory_order)
>+rte_atomic_thread_fence(int memorder)
> {
>-	__atomic_thread_fence(memory_order);
>+	__atomic_thread_fence(memorder);
> }
>
> /*------------------------- 16 bit atomic operations -------------------------*/ diff --
>git a/lib/librte_eal/x86/include/rte_atomic.h
>b/lib/librte_eal/x86/include/rte_atomic.h
>index b7d6b06ddf..915afd9d27 100644
>--- a/lib/librte_eal/x86/include/rte_atomic.h
>+++ b/lib/librte_eal/x86/include/rte_atomic.h
>@@ -87,12 +87,12 @@ rte_smp_mb(void)
>  * used instead.
>  */
> static __rte_always_inline void
>-rte_atomic_thread_fence(int memory_order)
>+rte_atomic_thread_fence(int memorder)
> {
>-	if (memory_order == __ATOMIC_SEQ_CST)
>+	if (memorder == __ATOMIC_SEQ_CST)
> 		rte_smp_mb();
> 	else
>-		__atomic_thread_fence(memory_order);
>+		__atomic_thread_fence(memorder);
> }
>
> /*------------------------- 16 bit atomic operations -------------------------*/
>--
>2.28.0.546.g385c171


  reply	other threads:[~2020-10-14 13:39 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-14  6:19 Eli Britstein
2020-10-14 13:34 ` Asaf Penso [this message]
2020-10-14 23:16 ` Thomas Monjalon
2020-10-15  7:12   ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DM5PR12MB240614D58ECDC154071873ECCD050@DM5PR12MB2406.namprd12.prod.outlook.com \
    --to=asafp@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=elibr@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).