From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4399E457A0; Mon, 12 Aug 2024 13:22:21 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 09EEB402C3; Mon, 12 Aug 2024 13:22:21 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 1FD2C4029C for ; Mon, 12 Aug 2024 13:22:19 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 54DFEFEC; Mon, 12 Aug 2024 04:22:44 -0700 (PDT) Received: from [10.1.38.158] (e132843.arm.com [10.1.38.158]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 999B03F6A8; Mon, 12 Aug 2024 04:22:16 -0700 (PDT) Message-ID: <0c46f8fd-c63b-4736-839f-ab787076109a@foss.arm.com> Date: Mon, 12 Aug 2024 12:22:14 +0100 MIME-Version: 1.0 User-Agent: Mozilla Thunderbird Subject: Re: [PATCH v2 5/5] eal: extend bitops to handle volatile pointers To: =?UTF-8?Q?Mattias_R=C3=B6nnblom?= , dev@dpdk.org Cc: hofors@lysator.liu.se, Heng Wang , Stephen Hemminger , Joyce Kong , Tyler Retzlaff , =?UTF-8?Q?Morten_Br=C3=B8rup?= References: <20240809090439.589295-2-mattias.ronnblom@ericsson.com> <20240809095829.589396-1-mattias.ronnblom@ericsson.com> <20240809095829.589396-6-mattias.ronnblom@ericsson.com> Content-Language: en-GB From: Jack Bond-Preston In-Reply-To: <20240809095829.589396-6-mattias.ronnblom@ericsson.com> Content-Type: text/plain; charset=UTF-8; format=flowed Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org On 09/08/2024 10:58, Mattias Rönnblom wrote: > > +#define __RTE_GEN_BIT_ATOMIC_TEST(v, qualifier, size) \ > __rte_experimental \ > static inline bool \ > - __rte_bit_atomic_test ## size(const uint ## size ## _t *addr, \ > - unsigned int nr, int memory_order) \ > + __rte_bit_atomic_ ## v ## test ## size(const qualifier uint ## size ## _t *addr, \ > + unsigned int nr, int memory_order) \ > { \ > RTE_ASSERT(nr < size); \ > \ > - const RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > - (const RTE_ATOMIC(uint ## size ## _t) *)addr; \ > + const qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > + (const qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ > uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ > return rte_atomic_load_explicit(a_addr, memory_order) & mask; \ > } > > -#define __RTE_GEN_BIT_ATOMIC_SET(size) \ > +#define __RTE_GEN_BIT_ATOMIC_SET(v, qualifier, size) \ > __rte_experimental \ > static inline void \ > - __rte_bit_atomic_set ## size(uint ## size ## _t *addr, \ > - unsigned int nr, int memory_order) \ > + __rte_bit_atomic_ ## v ## set ## size(qualifier uint ## size ## _t *addr, \ > + unsigned int nr, int memory_order) \ > { \ > RTE_ASSERT(nr < size); \ > \ > - RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > - (RTE_ATOMIC(uint ## size ## _t) *)addr; \ > + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ > uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ > rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \ > } > > -#define __RTE_GEN_BIT_ATOMIC_CLEAR(size) \ > +#define __RTE_GEN_BIT_ATOMIC_CLEAR(v, qualifier, size) \ > __rte_experimental \ > static inline void \ > - __rte_bit_atomic_clear ## size(uint ## size ## _t *addr, \ > - unsigned int nr, int memory_order) \ > + __rte_bit_atomic_ ## v ## clear ## size(qualifier uint ## size ## _t *addr, \ > + unsigned int nr, int memory_order) \ > { \ > RTE_ASSERT(nr < size); \ > \ > - RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > - (RTE_ATOMIC(uint ## size ## _t) *)addr; \ > + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ > uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ > rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \ > } > > -#define __RTE_GEN_BIT_ATOMIC_FLIP(size) \ > +#define __RTE_GEN_BIT_ATOMIC_FLIP(v, qualifier, size) \ > __rte_experimental \ > static inline void \ > - __rte_bit_atomic_flip ## size(uint ## size ## _t *addr, \ > - unsigned int nr, int memory_order) \ > + __rte_bit_atomic_ ## v ## flip ## size(qualifier uint ## size ## _t *addr, \ > + unsigned int nr, int memory_order) \ > { \ > RTE_ASSERT(nr < size); \ > \ > - RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > - (RTE_ATOMIC(uint ## size ## _t) *)addr; \ > + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ > uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ > rte_atomic_fetch_xor_explicit(a_addr, mask, memory_order); \ > } > > -#define __RTE_GEN_BIT_ATOMIC_ASSIGN(size) \ > +#define __RTE_GEN_BIT_ATOMIC_ASSIGN(v, qualifier, size) \ > __rte_experimental \ > static inline void \ > - __rte_bit_atomic_assign ## size(uint ## size ## _t *addr, \ > - unsigned int nr, bool value, \ > - int memory_order) \ > + __rte_bit_atomic_## v ## assign ## size(qualifier uint ## size ## _t *addr, \ > + unsigned int nr, bool value, \ > + int memory_order) \ > { \ > if (value) \ > - __rte_bit_atomic_set ## size(addr, nr, memory_order); \ > + __rte_bit_atomic_ ## v ## set ## size(addr, nr, memory_order); \ > else \ > - __rte_bit_atomic_clear ## size(addr, nr, \ > - memory_order); \ > + __rte_bit_atomic_ ## v ## clear ## size(addr, nr, \ > + memory_order); \ > } > > -#define __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(size) \ > +#define __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(v, qualifier, size) \ > __rte_experimental \ > static inline bool \ > - __rte_bit_atomic_test_and_set ## size(uint ## size ## _t *addr, \ > - unsigned int nr, \ > - int memory_order) \ > + __rte_bit_atomic_ ## v ## test_and_set ## size(qualifier uint ## size ## _t *addr, \ > + unsigned int nr, \ > + int memory_order) \ > { \ > RTE_ASSERT(nr < size); \ > \ > - RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > - (RTE_ATOMIC(uint ## size ## _t) *)addr; \ > + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ > uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ > uint ## size ## _t prev; \ > \ > @@ -587,17 +632,17 @@ __RTE_GEN_BIT_FLIP(, flip,, 64) > return prev & mask; \ > } > > -#define __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(size) \ > +#define __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(v, qualifier, size) \ > __rte_experimental \ > static inline bool \ > - __rte_bit_atomic_test_and_clear ## size(uint ## size ## _t *addr, \ > - unsigned int nr, \ > - int memory_order) \ > + __rte_bit_atomic_ ## v ## test_and_clear ## size(qualifier uint ## size ## _t *addr, \ > + unsigned int nr, \ > + int memory_order) \ > { \ > RTE_ASSERT(nr < size); \ > \ > - RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > - (RTE_ATOMIC(uint ## size ## _t) *)addr; \ > + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \ > + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \ > uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \ > uint ## size ## _t prev; \ > \ > @@ -607,34 +652,36 @@ __RTE_GEN_BIT_FLIP(, flip,, 64) > return prev & mask; \ > } > > -#define __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(size) \ > +#define __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(v, qualifier, size) \ > __rte_experimental \ > static inline bool \ > - __rte_bit_atomic_test_and_assign ## size(uint ## size ## _t *addr, \ > - unsigned int nr, \ > - bool value, \ > - int memory_order) \ > + __rte_bit_atomic_ ## v ## test_and_assign ## size(qualifier uint ## size ## _t *addr, \ > + unsigned int nr, \ > + bool value, \ > + int memory_order) \ > { \ > if (value) \ > - return __rte_bit_atomic_test_and_set ## size(addr, nr, \ > - memory_order); \ > + return __rte_bit_atomic_ ## v ## test_and_set ## size(addr, nr, memory_order); \ > else \ > - return __rte_bit_atomic_test_and_clear ## size(addr, nr, \ > - memory_order); \ > + return __rte_bit_atomic_ ## v ## test_and_clear ## size(addr, nr, memory_order); \ > } > > -#define __RTE_GEN_BIT_ATOMIC_OPS(size) \ > - __RTE_GEN_BIT_ATOMIC_TEST(size) \ > - __RTE_GEN_BIT_ATOMIC_SET(size) \ > - __RTE_GEN_BIT_ATOMIC_CLEAR(size) \ > - __RTE_GEN_BIT_ATOMIC_ASSIGN(size) \ > - __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(size) \ > - __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(size) \ > - __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(size) \ > - __RTE_GEN_BIT_ATOMIC_FLIP(size) > +#define __RTE_GEN_BIT_ATOMIC_OPS(v, qualifier, size) \ > + __RTE_GEN_BIT_ATOMIC_TEST(v, qualifier, size) \ > + __RTE_GEN_BIT_ATOMIC_SET(v, qualifier, size) \ > + __RTE_GEN_BIT_ATOMIC_CLEAR(v, qualifier, size) \ > + __RTE_GEN_BIT_ATOMIC_ASSIGN(v, qualifier, size) \ > + __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(v, qualifier, size) \ > + __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(v, qualifier, size) \ > + __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(v, qualifier, size) \ > + __RTE_GEN_BIT_ATOMIC_FLIP(v, qualifier, size) > > -__RTE_GEN_BIT_ATOMIC_OPS(32) > -__RTE_GEN_BIT_ATOMIC_OPS(64) > +#define __RTE_GEN_BIT_ATOMIC_OPS_SIZE(size) \ > + __RTE_GEN_BIT_ATOMIC_OPS(,, size) \ > + __RTE_GEN_BIT_ATOMIC_OPS(v_, volatile, size) > + > +__RTE_GEN_BIT_ATOMIC_OPS_SIZE(32) > +__RTE_GEN_BIT_ATOMIC_OPS_SIZE(64) The first argument for these should probably be called "family", for consistency with the non-atomic ops. > > /*------------------------ 32-bit relaxed operations ------------------------*/ >