From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id D5D1243190;
	Tue, 17 Oct 2023 22:32:42 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 8014D42E49;
	Tue, 17 Oct 2023 22:31:37 +0200 (CEST)
Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182])
 by mails.dpdk.org (Postfix) with ESMTP id 759D3402DE
 for <dev@dpdk.org>; Tue, 17 Oct 2023 22:31:20 +0200 (CEST)
Received: by linux.microsoft.com (Postfix, from userid 1086)
 id 1773020B74CB; Tue, 17 Oct 2023 13:31:18 -0700 (PDT)
DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 1773020B74CB
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;
 s=default; t=1697574679;
 bh=pEFbJZzyaIqLKkUDwBHn2JsUK4YWeGp0ZMPJCXKNSt0=;
 h=From:To:Cc:Subject:Date:In-Reply-To:References:From;
 b=XGhFlWyVeSDpajpII8CoL8G5Gdc7b3eFFYK1Vd4OsKMGNot6zLskN+WCl4VgqGweR
 QRVVP86G/chDEZngiedyfcMtjagk6vlwY/pCBpOADBYdDjDqaBM6vWKgbo1SSusdRh
 xmN4U/qTNWtjNd9Du1cpQSad/bGjzuFpjmz0gaRM=
From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: Akhil Goyal <gakhil@marvell.com>,
 Anatoly Burakov <anatoly.burakov@intel.com>,
 Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
 Bruce Richardson <bruce.richardson@intel.com>,
 Chenbo Xia <chenbo.xia@intel.com>, Ciara Power <ciara.power@intel.com>,
 David Christensen <drc@linux.vnet.ibm.com>,
 David Hunt <david.hunt@intel.com>,
 Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,
 Dmitry Malloy <dmitrym@microsoft.com>,
 Elena Agostini <eagostini@nvidia.com>,
 Erik Gabriel Carrillo <erik.g.carrillo@intel.com>,
 Fan Zhang <fanzhang.oss@gmail.com>, Ferruh Yigit <ferruh.yigit@amd.com>,
 Harman Kalra <hkalra@marvell.com>,
 Harry van Haaren <harry.van.haaren@intel.com>,
 Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
 Jerin Jacob <jerinj@marvell.com>,
 Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
 Matan Azrad <matan@nvidia.com>,
 Maxime Coquelin <maxime.coquelin@redhat.com>,
 Narcisa Ana Maria Vasile <navasile@linux.microsoft.com>,
 Nicolas Chautru <nicolas.chautru@intel.com>,
 Olivier Matz <olivier.matz@6wind.com>, Ori Kam <orika@nvidia.com>,
 Pallavi Kadam <pallavi.kadam@intel.com>,
 Pavan Nikhilesh <pbhagavatula@marvell.com>,
 Reshma Pattan <reshma.pattan@intel.com>,
 Sameh Gobriel <sameh.gobriel@intel.com>,
 Shijith Thotton <sthotton@marvell.com>,
 Sivaprasad Tummala <sivaprasad.tummala@amd.com>,
 Stephen Hemminger <stephen@networkplumber.org>,
 Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,
 Thomas Monjalon <thomas@monjalon.net>,
 Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
 Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
 Yipeng Wang <yipeng1.wang@intel.com>,
 Tyler Retzlaff <roretzla@linux.microsoft.com>
Subject: [PATCH v2 11/19] stack: use rte optional stdatomic API
Date: Tue, 17 Oct 2023 13:31:09 -0700
Message-Id: <1697574677-16578-12-git-send-email-roretzla@linux.microsoft.com>
X-Mailer: git-send-email 1.8.3.1
In-Reply-To: <1697574677-16578-1-git-send-email-roretzla@linux.microsoft.com>
References: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com>
 <1697574677-16578-1-git-send-email-roretzla@linux.microsoft.com>
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional stdatomic API

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 lib/stack/rte_stack.h            |  2 +-
 lib/stack/rte_stack_lf_c11.h     | 24 ++++++++++++------------
 lib/stack/rte_stack_lf_generic.h | 18 +++++++++---------
 3 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/lib/stack/rte_stack.h b/lib/stack/rte_stack.h
index 921d29a..a379300 100644
--- a/lib/stack/rte_stack.h
+++ b/lib/stack/rte_stack.h
@@ -44,7 +44,7 @@ struct rte_stack_lf_list {
 	/** List head */
 	struct rte_stack_lf_head head __rte_aligned(16);
 	/** List len */
-	uint64_t len;
+	RTE_ATOMIC(uint64_t) len;
 };
 
 /* Structure containing two lock-free LIFO lists: the stack itself and a list
diff --git a/lib/stack/rte_stack_lf_c11.h b/lib/stack/rte_stack_lf_c11.h
index 687a6f6..9cb6998 100644
--- a/lib/stack/rte_stack_lf_c11.h
+++ b/lib/stack/rte_stack_lf_c11.h
@@ -26,8 +26,8 @@
 	 * elements. If the mempool is near-empty to the point that this is a
 	 * concern, the user should consider increasing the mempool size.
 	 */
-	return (unsigned int)__atomic_load_n(&s->stack_lf.used.len,
-					     __ATOMIC_RELAXED);
+	return (unsigned int)rte_atomic_load_explicit(&s->stack_lf.used.len,
+					     rte_memory_order_relaxed);
 }
 
 static __rte_always_inline void
@@ -59,14 +59,14 @@
 				(rte_int128_t *)&list->head,
 				(rte_int128_t *)&old_head,
 				(rte_int128_t *)&new_head,
-				1, __ATOMIC_RELEASE,
-				__ATOMIC_RELAXED);
+				1, rte_memory_order_release,
+				rte_memory_order_relaxed);
 	} while (success == 0);
 
 	/* Ensure the stack modifications are not reordered with respect
 	 * to the LIFO len update.
 	 */
-	__atomic_fetch_add(&list->len, num, __ATOMIC_RELEASE);
+	rte_atomic_fetch_add_explicit(&list->len, num, rte_memory_order_release);
 }
 
 static __rte_always_inline struct rte_stack_lf_elem *
@@ -80,7 +80,7 @@
 	int success;
 
 	/* Reserve num elements, if available */
-	len = __atomic_load_n(&list->len, __ATOMIC_RELAXED);
+	len = rte_atomic_load_explicit(&list->len, rte_memory_order_relaxed);
 
 	while (1) {
 		/* Does the list contain enough elements? */
@@ -88,10 +88,10 @@
 			return NULL;
 
 		/* len is updated on failure */
-		if (__atomic_compare_exchange_n(&list->len,
+		if (rte_atomic_compare_exchange_weak_explicit(&list->len,
 						&len, len - num,
-						1, __ATOMIC_ACQUIRE,
-						__ATOMIC_RELAXED))
+						rte_memory_order_acquire,
+						rte_memory_order_relaxed))
 			break;
 	}
 
@@ -110,7 +110,7 @@
 		 * elements are properly ordered with respect to the head
 		 * pointer read.
 		 */
-		__atomic_thread_fence(__ATOMIC_ACQUIRE);
+		__atomic_thread_fence(rte_memory_order_acquire);
 
 		rte_prefetch0(old_head.top);
 
@@ -159,8 +159,8 @@
 				(rte_int128_t *)&list->head,
 				(rte_int128_t *)&old_head,
 				(rte_int128_t *)&new_head,
-				0, __ATOMIC_RELAXED,
-				__ATOMIC_RELAXED);
+				0, rte_memory_order_relaxed,
+				rte_memory_order_relaxed);
 	} while (success == 0);
 
 	return old_head.top;
diff --git a/lib/stack/rte_stack_lf_generic.h b/lib/stack/rte_stack_lf_generic.h
index 39f7ff3..cc69e4d 100644
--- a/lib/stack/rte_stack_lf_generic.h
+++ b/lib/stack/rte_stack_lf_generic.h
@@ -27,7 +27,7 @@
 	 * concern, the user should consider increasing the mempool size.
 	 */
 	/* NOTE: review for potential ordering optimization */
-	return __atomic_load_n(&s->stack_lf.used.len, __ATOMIC_SEQ_CST);
+	return rte_atomic_load_explicit(&s->stack_lf.used.len, rte_memory_order_seq_cst);
 }
 
 static __rte_always_inline void
@@ -64,11 +64,11 @@
 				(rte_int128_t *)&list->head,
 				(rte_int128_t *)&old_head,
 				(rte_int128_t *)&new_head,
-				1, __ATOMIC_RELEASE,
-				__ATOMIC_RELAXED);
+				1, rte_memory_order_release,
+				rte_memory_order_relaxed);
 	} while (success == 0);
 	/* NOTE: review for potential ordering optimization */
-	__atomic_fetch_add(&list->len, num, __ATOMIC_SEQ_CST);
+	rte_atomic_fetch_add_explicit(&list->len, num, rte_memory_order_seq_cst);
 }
 
 static __rte_always_inline struct rte_stack_lf_elem *
@@ -83,15 +83,15 @@
 	/* Reserve num elements, if available */
 	while (1) {
 		/* NOTE: review for potential ordering optimization */
-		uint64_t len = __atomic_load_n(&list->len, __ATOMIC_SEQ_CST);
+		uint64_t len = rte_atomic_load_explicit(&list->len, rte_memory_order_seq_cst);
 
 		/* Does the list contain enough elements? */
 		if (unlikely(len < num))
 			return NULL;
 
 		/* NOTE: review for potential ordering optimization */
-		if (__atomic_compare_exchange_n(&list->len, &len, len - num,
-				0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+		if (rte_atomic_compare_exchange_strong_explicit(&list->len, &len, len - num,
+				rte_memory_order_seq_cst, rte_memory_order_seq_cst))
 			break;
 	}
 
@@ -143,8 +143,8 @@
 				(rte_int128_t *)&list->head,
 				(rte_int128_t *)&old_head,
 				(rte_int128_t *)&new_head,
-				1, __ATOMIC_RELEASE,
-				__ATOMIC_RELAXED);
+				1, rte_memory_order_release,
+				rte_memory_order_relaxed);
 	} while (success == 0);
 
 	return old_head.top;
-- 
1.8.3.1