From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 8B07D4308E;
	Thu, 17 Aug 2023 23:42:58 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 887BE4325F;
	Thu, 17 Aug 2023 23:42:39 +0200 (CEST)
Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182])
 by mails.dpdk.org (Postfix) with ESMTP id 6341D410D0;
 Thu, 17 Aug 2023 23:42:32 +0200 (CEST)
Received: by linux.microsoft.com (Postfix, from userid 1086)
 id AC701211F7C5; Thu, 17 Aug 2023 14:42:31 -0700 (PDT)
DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com AC701211F7C5
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;
 s=default; t=1692308551;
 bh=jMCP39zXfPmfdUdWSu/47Sg9Yd4Kx+5ar4zLq93WZDs=;
 h=From:To:Cc:Subject:Date:In-Reply-To:References:From;
 b=JlEnV0ZqGr9Lpf42TpBPyWK/2GwiXFT6cYSfzwIF8+iC0h0P/eXpO4Yt0cp0xq+px
 sevRFfPp4OmYFna5n3VU8Tuo8Mwn4oJ2sstRTkY3sJMctOh7f5ZC7/lZLmFof2PYsV
 kxOSA9cFyH25esm8mKMyDjQZrI3jJBPlRFrDP7fI=
From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: techboard@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>,
 Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
 Ruifeng Wang <ruifeng.wang@arm.com>, Jerin Jacob <jerinj@marvell.com>,
 Sunil Kumar Kori <skori@marvell.com>,
 =?UTF-8?q?Mattias=20R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>,
 Joyce Kong <joyce.kong@arm.com>,
 David Christensen <drc@linux.vnet.ibm.com>,
 Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
 David Hunt <david.hunt@intel.com>, Thomas Monjalon <thomas@monjalon.net>,
 David Marchand <david.marchand@redhat.com>,
 Tyler Retzlaff <roretzla@linux.microsoft.com>
Subject: [PATCH v5 4/6] distributor: adapt for EAL optional atomics API changes
Date: Thu, 17 Aug 2023 14:42:15 -0700
Message-Id: <1692308537-2646-5-git-send-email-roretzla@linux.microsoft.com>
X-Mailer: git-send-email 1.8.3.1
In-Reply-To: <1692308537-2646-1-git-send-email-roretzla@linux.microsoft.com>
References: <1691717521-1025-1-git-send-email-roretzla@linux.microsoft.com>
 <1692308537-2646-1-git-send-email-roretzla@linux.microsoft.com>
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Adapt distributor for EAL optional atomics API changes

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Reviewed-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/distributor/distributor_private.h    |  2 +-
 lib/distributor/rte_distributor_single.c | 44 ++++++++++++++++----------------
 2 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/lib/distributor/distributor_private.h b/lib/distributor/distributor_private.h
index 7101f63..2f29343 100644
--- a/lib/distributor/distributor_private.h
+++ b/lib/distributor/distributor_private.h
@@ -52,7 +52,7 @@
  * Only 64-bits of the memory is actually used though.
  */
 union rte_distributor_buffer_single {
-	volatile int64_t bufptr64;
+	volatile RTE_ATOMIC(int64_t) bufptr64;
 	char pad[RTE_CACHE_LINE_SIZE*3];
 } __rte_cache_aligned;
 
diff --git a/lib/distributor/rte_distributor_single.c b/lib/distributor/rte_distributor_single.c
index 2c77ac4..ad43c13 100644
--- a/lib/distributor/rte_distributor_single.c
+++ b/lib/distributor/rte_distributor_single.c
@@ -32,10 +32,10 @@
 	int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
 			| RTE_DISTRIB_GET_BUF;
 	RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK,
-		==, 0, __ATOMIC_RELAXED);
+		==, 0, rte_memory_order_relaxed);
 
 	/* Sync with distributor on GET_BUF flag. */
-	__atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
+	rte_atomic_store_explicit(&buf->bufptr64, req, rte_memory_order_release);
 }
 
 struct rte_mbuf *
@@ -44,7 +44,7 @@ struct rte_mbuf *
 {
 	union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
 	/* Sync with distributor. Acquire bufptr64. */
-	if (__atomic_load_n(&buf->bufptr64, __ATOMIC_ACQUIRE)
+	if (rte_atomic_load_explicit(&buf->bufptr64, rte_memory_order_acquire)
 		& RTE_DISTRIB_GET_BUF)
 		return NULL;
 
@@ -72,10 +72,10 @@ struct rte_mbuf *
 	uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
 			| RTE_DISTRIB_RETURN_BUF;
 	RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK,
-		==, 0, __ATOMIC_RELAXED);
+		==, 0, rte_memory_order_relaxed);
 
 	/* Sync with distributor on RETURN_BUF flag. */
-	__atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
+	rte_atomic_store_explicit(&buf->bufptr64, req, rte_memory_order_release);
 	return 0;
 }
 
@@ -119,7 +119,7 @@ struct rte_mbuf *
 	d->in_flight_tags[wkr] = 0;
 	d->in_flight_bitmask &= ~(1UL << wkr);
 	/* Sync with worker. Release bufptr64. */
-	__atomic_store_n(&(d->bufs[wkr].bufptr64), 0, __ATOMIC_RELEASE);
+	rte_atomic_store_explicit(&d->bufs[wkr].bufptr64, 0, rte_memory_order_release);
 	if (unlikely(d->backlog[wkr].count != 0)) {
 		/* On return of a packet, we need to move the
 		 * queued packets for this core elsewhere.
@@ -165,21 +165,21 @@ struct rte_mbuf *
 	for (wkr = 0; wkr < d->num_workers; wkr++) {
 		uintptr_t oldbuf = 0;
 		/* Sync with worker. Acquire bufptr64. */
-		const int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
-							__ATOMIC_ACQUIRE);
+		const int64_t data = rte_atomic_load_explicit(&d->bufs[wkr].bufptr64,
+							rte_memory_order_acquire);
 
 		if (data & RTE_DISTRIB_GET_BUF) {
 			flushed++;
 			if (d->backlog[wkr].count)
 				/* Sync with worker. Release bufptr64. */
-				__atomic_store_n(&(d->bufs[wkr].bufptr64),
+				rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
 					backlog_pop(&d->backlog[wkr]),
-					__ATOMIC_RELEASE);
+					rte_memory_order_release);
 			else {
 				/* Sync with worker on GET_BUF flag. */
-				__atomic_store_n(&(d->bufs[wkr].bufptr64),
+				rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
 					RTE_DISTRIB_GET_BUF,
-					__ATOMIC_RELEASE);
+					rte_memory_order_release);
 				d->in_flight_tags[wkr] = 0;
 				d->in_flight_bitmask &= ~(1UL << wkr);
 			}
@@ -217,8 +217,8 @@ struct rte_mbuf *
 	while (next_idx < num_mbufs || next_mb != NULL) {
 		uintptr_t oldbuf = 0;
 		/* Sync with worker. Acquire bufptr64. */
-		int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64),
-						__ATOMIC_ACQUIRE);
+		int64_t data = rte_atomic_load_explicit(&(d->bufs[wkr].bufptr64),
+						rte_memory_order_acquire);
 
 		if (!next_mb) {
 			next_mb = mbufs[next_idx++];
@@ -264,15 +264,15 @@ struct rte_mbuf *
 
 			if (d->backlog[wkr].count)
 				/* Sync with worker. Release bufptr64. */
-				__atomic_store_n(&(d->bufs[wkr].bufptr64),
+				rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
 						backlog_pop(&d->backlog[wkr]),
-						__ATOMIC_RELEASE);
+						rte_memory_order_release);
 
 			else {
 				/* Sync with worker. Release bufptr64.  */
-				__atomic_store_n(&(d->bufs[wkr].bufptr64),
+				rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
 						next_value,
-						__ATOMIC_RELEASE);
+						rte_memory_order_release);
 				d->in_flight_tags[wkr] = new_tag;
 				d->in_flight_bitmask |= (1UL << wkr);
 				next_mb = NULL;
@@ -294,8 +294,8 @@ struct rte_mbuf *
 	for (wkr = 0; wkr < d->num_workers; wkr++)
 		if (d->backlog[wkr].count &&
 				/* Sync with worker. Acquire bufptr64. */
-				(__atomic_load_n(&(d->bufs[wkr].bufptr64),
-				__ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
+				(rte_atomic_load_explicit(&d->bufs[wkr].bufptr64,
+				rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) {
 
 			int64_t oldbuf = d->bufs[wkr].bufptr64 >>
 					RTE_DISTRIB_FLAG_BITS;
@@ -303,9 +303,9 @@ struct rte_mbuf *
 			store_return(oldbuf, d, &ret_start, &ret_count);
 
 			/* Sync with worker. Release bufptr64. */
-			__atomic_store_n(&(d->bufs[wkr].bufptr64),
+			rte_atomic_store_explicit(&d->bufs[wkr].bufptr64,
 				backlog_pop(&d->backlog[wkr]),
-				__ATOMIC_RELEASE);
+				rte_memory_order_release);
 		}
 
 	d->returns.start = ret_start;
-- 
1.8.3.1