From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9BD3243200; Thu, 26 Oct 2023 02:33:03 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4D11D42DEB; Thu, 26 Oct 2023 02:32:13 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 5B4D64027F for ; Thu, 26 Oct 2023 02:32:01 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id 3187920B74CF; Wed, 25 Oct 2023 17:31:59 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 3187920B74CF DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1698280320; bh=z0Rs5NSot0ilkYEI+ncHHjpoGPyvF3LWV91KEgWDvF4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=BN2cnAcythUQ8SB1mKiM61XHjE4bsQdMCtU1/jov2cZFwZVV3K40XZWJiTVXPkTkj tdtV+b6yBCkyPHT4+y6uA/dKFlMir8qDuBVzfzJ+oXVkcprsH8xWxAXZMH5XPU+o8D Tobo5Wi2QgC8O/6HQ6u/kPgqmTWxEPpXtnozHHgc= From: Tyler Retzlaff To: dev@dpdk.org Cc: Akhil Goyal , Anatoly Burakov , Andrew Rybchenko , Bruce Richardson , Chenbo Xia , Ciara Power , David Christensen , David Hunt , Dmitry Kozlyuk , Dmitry Malloy , Elena Agostini , Erik Gabriel Carrillo , Fan Zhang , Ferruh Yigit , Harman Kalra , Harry van Haaren , Honnappa Nagarahalli , Jerin Jacob , Konstantin Ananyev , Matan Azrad , Maxime Coquelin , Narcisa Ana Maria Vasile , Nicolas Chautru , Olivier Matz , Ori Kam , Pallavi Kadam , Pavan Nikhilesh , Reshma Pattan , Sameh Gobriel , Shijith Thotton , Sivaprasad Tummala , Stephen Hemminger , Suanming Mou , Sunil Kumar Kori , Thomas Monjalon , Viacheslav Ovsiienko , Vladimir Medvedkin , Yipeng Wang , Tyler Retzlaff Subject: [PATCH v3 15/19] distributor: use rte optional stdatomic API Date: Wed, 25 Oct 2023 17:31:50 -0700 Message-Id: <1698280314-25861-16-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1698280314-25861-1-git-send-email-roretzla@linux.microsoft.com> References: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> <1698280314-25861-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding rte_atomic_xxx optional stdatomic API Signed-off-by: Tyler Retzlaff --- lib/distributor/distributor_private.h | 4 +-- lib/distributor/rte_distributor.c | 54 +++++++++++++++++------------------ 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/lib/distributor/distributor_private.h b/lib/distributor/distributor_private.h index 2f29343..dfeb9b5 100644 --- a/lib/distributor/distributor_private.h +++ b/lib/distributor/distributor_private.h @@ -113,12 +113,12 @@ enum rte_distributor_match_function { * There is a separate cacheline for returns in the burst API. */ struct rte_distributor_buffer { - volatile int64_t bufptr64[RTE_DIST_BURST_SIZE] + volatile RTE_ATOMIC(int64_t) bufptr64[RTE_DIST_BURST_SIZE] __rte_cache_aligned; /* <= outgoing to worker */ int64_t pad1 __rte_cache_aligned; /* <= one cache line */ - volatile int64_t retptr64[RTE_DIST_BURST_SIZE] + volatile RTE_ATOMIC(int64_t) retptr64[RTE_DIST_BURST_SIZE] __rte_cache_aligned; /* <= incoming from worker */ int64_t pad2 __rte_cache_aligned; /* <= one cache line */ diff --git a/lib/distributor/rte_distributor.c b/lib/distributor/rte_distributor.c index 5ca80dd..2ecb95c 100644 --- a/lib/distributor/rte_distributor.c +++ b/lib/distributor/rte_distributor.c @@ -38,7 +38,7 @@ struct rte_distributor_buffer *buf = &(d->bufs[worker_id]); unsigned int i; - volatile int64_t *retptr64; + volatile RTE_ATOMIC(int64_t) *retptr64; if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) { rte_distributor_request_pkt_single(d->d_single, @@ -50,7 +50,7 @@ /* Spin while handshake bits are set (scheduler clears it). * Sync with worker on GET_BUF flag. */ - while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE) + while (unlikely(rte_atomic_load_explicit(retptr64, rte_memory_order_acquire) & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) { rte_pause(); uint64_t t = rte_rdtsc()+100; @@ -78,8 +78,8 @@ * line is ready for processing * Sync with distributor to release retptrs */ - __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF, + rte_memory_order_release); } int @@ -102,7 +102,7 @@ * RETURN_BUF is set when distributor must retrieve in-flight packets * Sync with distributor to acquire bufptrs */ - if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE) + if (rte_atomic_load_explicit(&(buf->bufptr64[0]), rte_memory_order_acquire) & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) return -1; @@ -120,8 +120,8 @@ * on the next cacheline while we're working. * Sync with distributor on GET_BUF flag. Release bufptrs. */ - __atomic_store_n(&(buf->bufptr64[0]), - buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->bufptr64[0]), + buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, rte_memory_order_release); return count; } @@ -177,7 +177,7 @@ /* Spin while handshake bits are set (scheduler clears it). * Sync with worker on GET_BUF flag. */ - while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED) + while (unlikely(rte_atomic_load_explicit(&(buf->retptr64[0]), rte_memory_order_relaxed) & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) { rte_pause(); uint64_t t = rte_rdtsc()+100; @@ -187,7 +187,7 @@ } /* Sync with distributor to acquire retptrs */ - __atomic_thread_fence(__ATOMIC_ACQUIRE); + __atomic_thread_fence(rte_memory_order_acquire); for (i = 0; i < RTE_DIST_BURST_SIZE; i++) /* Switch off the return bit first */ buf->retptr64[i] = 0; @@ -200,15 +200,15 @@ * we won't read any mbufs from there even if GET_BUF is set. * This allows distributor to retrieve in-flight already sent packets. */ - __atomic_fetch_or(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF, - __ATOMIC_ACQ_REL); + rte_atomic_fetch_or_explicit(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF, + rte_memory_order_acq_rel); /* set the RETURN_BUF on retptr64 even if we got no returns. * Sync with distributor on RETURN_BUF flag. Release retptrs. * Notify distributor that we don't request more packets any more. */ - __atomic_store_n(&(buf->retptr64[0]), - buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->retptr64[0]), + buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, rte_memory_order_release); return 0; } @@ -297,7 +297,7 @@ * to worker which does not require new packets. * They must be retrieved and assigned to another worker. */ - if (!(__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE) + if (!(rte_atomic_load_explicit(&(buf->bufptr64[0]), rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) for (i = 0; i < RTE_DIST_BURST_SIZE; i++) if (buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF) @@ -310,8 +310,8 @@ * with new packets if worker will make a new request. * - clear RETURN_BUF to unlock reads on worker side. */ - __atomic_store_n(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF, + rte_memory_order_release); /* Collect backlog packets from worker */ for (i = 0; i < d->backlog[wkr].count; i++) @@ -348,7 +348,7 @@ unsigned int i; /* Sync on GET_BUF flag. Acquire retptrs. */ - if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE) + if (rte_atomic_load_explicit(&(buf->retptr64[0]), rte_memory_order_acquire) & (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) { for (i = 0; i < RTE_DIST_BURST_SIZE; i++) { if (buf->retptr64[i] & RTE_DISTRIB_VALID_BUF) { @@ -379,7 +379,7 @@ /* Clear for the worker to populate with more returns. * Sync with distributor on GET_BUF flag. Release retptrs. */ - __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->retptr64[0]), 0, rte_memory_order_release); } return count; } @@ -404,7 +404,7 @@ return 0; /* Sync with worker on GET_BUF flag */ - while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE) + while (!(rte_atomic_load_explicit(&(d->bufs[wkr].bufptr64[0]), rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) { handle_returns(d, wkr); if (unlikely(!d->active[wkr])) @@ -430,8 +430,8 @@ /* Clear the GET bit. * Sync with worker on GET_BUF flag. Release bufptrs. */ - __atomic_store_n(&(buf->bufptr64[0]), - buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(buf->bufptr64[0]), + buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, rte_memory_order_release); return buf->count; } @@ -463,8 +463,8 @@ /* Flush out all non-full cache-lines to workers. */ for (wid = 0 ; wid < d->num_workers; wid++) { /* Sync with worker on GET_BUF flag. */ - if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]), - __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) { + if (rte_atomic_load_explicit(&(d->bufs[wid].bufptr64[0]), + rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF) { d->bufs[wid].count = 0; release(d, wid); handle_returns(d, wid); @@ -598,8 +598,8 @@ /* Flush out all non-full cache-lines to workers. */ for (wid = 0 ; wid < d->num_workers; wid++) /* Sync with worker on GET_BUF flag. */ - if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]), - __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) { + if ((rte_atomic_load_explicit(&(d->bufs[wid].bufptr64[0]), + rte_memory_order_acquire) & RTE_DISTRIB_GET_BUF)) { d->bufs[wid].count = 0; release(d, wid); } @@ -700,8 +700,8 @@ /* throw away returns, so workers can exit */ for (wkr = 0; wkr < d->num_workers; wkr++) /* Sync with worker. Release retptrs. */ - __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0, - __ATOMIC_RELEASE); + rte_atomic_store_explicit(&(d->bufs[wkr].retptr64[0]), 0, + rte_memory_order_release); d->returns.start = d->returns.count = 0; } -- 1.8.3.1