From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 41A5143D09; Wed, 20 Mar 2024 21:54:36 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D131B42E86; Wed, 20 Mar 2024 21:52:10 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 62E9642D72 for ; Wed, 20 Mar 2024 21:51:41 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1086) id 03C6120B74DA; Wed, 20 Mar 2024 13:51:33 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 03C6120B74DA DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1710967895; bh=qytB/9/fnbhhaB6MUaJ4mu81yjAUAK0eBTWY1LRF6bk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=qalKBgYrGrGq9tpX9pRV11c36VpLpBkn8IOBaw1NCTozdBEP4OK6iWfnbS5SWBDNS P18Ut0zH0fcbcHrVTjfDlno26r6JadgUqcBmwkpn9pUrAJAfNU/j9qZr+L5jlKHUCL jL5a+PJ0/A8fsUTaW0TAvLhsETRAo0mjy413yL68= From: Tyler Retzlaff To: dev@dpdk.org Cc: =?UTF-8?q?Mattias=20R=C3=B6nnblom?= , =?UTF-8?q?Morten=20Br=C3=B8rup?= , Abdullah Sevincer , Ajit Khaparde , Alok Prasad , Anatoly Burakov , Andrew Rybchenko , Anoob Joseph , Bruce Richardson , Byron Marohn , Chenbo Xia , Chengwen Feng , Ciara Loftus , Ciara Power , Dariusz Sosnowski , David Hunt , Devendra Singh Rawat , Erik Gabriel Carrillo , Guoyang Zhou , Harman Kalra , Harry van Haaren , Honnappa Nagarahalli , Jakub Grajciar , Jerin Jacob , Jeroen de Borst , Jian Wang , Jiawen Wu , Jie Hai , Jingjing Wu , Joshua Washington , Joyce Kong , Junfeng Guo , Kevin Laatz , Konstantin Ananyev , Liang Ma , Long Li , Maciej Czekaj , Matan Azrad , Maxime Coquelin , Nicolas Chautru , Ori Kam , Pavan Nikhilesh , Peter Mccarthy , Rahul Lakkireddy , Reshma Pattan , Rosen Xu , Ruifeng Wang , Rushil Gupta , Sameh Gobriel , Sivaprasad Tummala , Somnath Kotur , Stephen Hemminger , Suanming Mou , Sunil Kumar Kori , Sunil Uttarwar , Tetsuya Mukawa , Vamsi Attunuru , Viacheslav Ovsiienko , Vladimir Medvedkin , Xiaoyun Wang , Yipeng Wang , Yisen Zhuang , Yuying Zhang , Yuying Zhang , Ziyang Xuan , Tyler Retzlaff Subject: [PATCH 26/46] event/dsw: use rte stdatomic API Date: Wed, 20 Mar 2024 13:51:12 -0700 Message-Id: <1710967892-7046-27-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com> References: <1710967892-7046-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding rte_atomic_xxx optional rte stdatomic API. Signed-off-by: Tyler Retzlaff --- drivers/event/dsw/dsw_evdev.h | 6 +++--- drivers/event/dsw/dsw_event.c | 34 +++++++++++++++++----------------- drivers/event/dsw/dsw_xstats.c | 4 ++-- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h index d745c89..20431d2 100644 --- a/drivers/event/dsw/dsw_evdev.h +++ b/drivers/event/dsw/dsw_evdev.h @@ -227,9 +227,9 @@ struct dsw_port { struct rte_ring *ctl_in_ring __rte_cache_aligned; /* Estimate of current port load. */ - int16_t load __rte_cache_aligned; + RTE_ATOMIC(int16_t) load __rte_cache_aligned; /* Estimate of flows currently migrating to this port. */ - int32_t immigration_load __rte_cache_aligned; + RTE_ATOMIC(int32_t) immigration_load __rte_cache_aligned; } __rte_cache_aligned; struct dsw_queue { @@ -252,7 +252,7 @@ struct dsw_evdev { uint8_t num_queues; int32_t max_inflight; - int32_t credits_on_loan __rte_cache_aligned; + RTE_ATOMIC(int32_t) credits_on_loan __rte_cache_aligned; }; #define DSW_CTL_PAUS_REQ (0) diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c index 23488d9..6c17b44 100644 --- a/drivers/event/dsw/dsw_event.c +++ b/drivers/event/dsw/dsw_event.c @@ -33,7 +33,7 @@ } total_on_loan = - __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED); + rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed); available = dsw->max_inflight - total_on_loan; acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS); @@ -45,13 +45,13 @@ * allocation. */ new_total_on_loan = - __atomic_fetch_add(&dsw->credits_on_loan, acquired_credits, - __ATOMIC_RELAXED) + acquired_credits; + rte_atomic_fetch_add_explicit(&dsw->credits_on_loan, acquired_credits, + rte_memory_order_relaxed) + acquired_credits; if (unlikely(new_total_on_loan > dsw->max_inflight)) { /* Some other port took the last credits */ - __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits, - __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, acquired_credits, + rte_memory_order_relaxed); return false; } @@ -77,8 +77,8 @@ port->inflight_credits = leave_credits; - __atomic_fetch_sub(&dsw->credits_on_loan, return_credits, - __ATOMIC_RELAXED); + rte_atomic_fetch_sub_explicit(&dsw->credits_on_loan, return_credits, + rte_memory_order_relaxed); DSW_LOG_DP_PORT(DEBUG, port->id, "Returned %d tokens to pool.\n", @@ -156,19 +156,19 @@ int16_t period_load; int16_t new_load; - old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED); + old_load = rte_atomic_load_explicit(&port->load, rte_memory_order_relaxed); period_load = dsw_port_load_close_period(port, now); new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) / (DSW_OLD_LOAD_WEIGHT+1); - __atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&port->load, new_load, rte_memory_order_relaxed); /* The load of the recently immigrated flows should hopefully * be reflected the load estimate by now. */ - __atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED); + rte_atomic_store_explicit(&port->immigration_load, 0, rte_memory_order_relaxed); } static void @@ -390,10 +390,10 @@ struct dsw_queue_flow_burst { for (i = 0; i < dsw->num_ports; i++) { int16_t measured_load = - __atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED); + rte_atomic_load_explicit(&dsw->ports[i].load, rte_memory_order_relaxed); int32_t immigration_load = - __atomic_load_n(&dsw->ports[i].immigration_load, - __ATOMIC_RELAXED); + rte_atomic_load_explicit(&dsw->ports[i].immigration_load, + rte_memory_order_relaxed); int32_t load = measured_load + immigration_load; load = RTE_MIN(load, DSW_MAX_LOAD); @@ -523,8 +523,8 @@ struct dsw_queue_flow_burst { target_qfs[*targets_len] = *candidate_qf; (*targets_len)++; - __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load, - candidate_flow_load, __ATOMIC_RELAXED); + rte_atomic_fetch_add_explicit(&dsw->ports[candidate_port_id].immigration_load, + candidate_flow_load, rte_memory_order_relaxed); return true; } @@ -882,7 +882,7 @@ struct dsw_queue_flow_burst { } source_port_load = - __atomic_load_n(&source_port->load, __ATOMIC_RELAXED); + rte_atomic_load_explicit(&source_port->load, rte_memory_order_relaxed); if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) { DSW_LOG_DP_PORT(DEBUG, source_port->id, "Load %d is below threshold level %d.\n", @@ -1301,7 +1301,7 @@ struct dsw_queue_flow_burst { * above the water mark. */ if (unlikely(num_new > 0 && - __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) > + rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed) > source_port->new_event_threshold)) return 0; diff --git a/drivers/event/dsw/dsw_xstats.c b/drivers/event/dsw/dsw_xstats.c index 2a83a28..f61dfd8 100644 --- a/drivers/event/dsw/dsw_xstats.c +++ b/drivers/event/dsw/dsw_xstats.c @@ -48,7 +48,7 @@ struct dsw_xstats_port { static uint64_t dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw) { - return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED); + return rte_atomic_load_explicit(&dsw->credits_on_loan, rte_memory_order_relaxed); } static struct dsw_xstat_dev dsw_dev_xstats[] = { @@ -126,7 +126,7 @@ struct dsw_xstats_port { { int16_t load; - load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED); + load = rte_atomic_load_explicit(&dsw->ports[port_id].load, rte_memory_order_relaxed); return DSW_LOAD_TO_PERCENT(load); } -- 1.8.3.1