From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1BE2343183; Tue, 17 Oct 2023 01:10:38 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6229441153; Tue, 17 Oct 2023 01:09:24 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 1FCE240E6E for ; Tue, 17 Oct 2023 01:09:09 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id DAF7E20B74D2; Mon, 16 Oct 2023 16:09:07 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com DAF7E20B74D2 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1697497747; bh=/gksnEdt4PpTE4w2AdlgbdPcu7rzTWZznbvtvFUjcSs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=qTsRjFriP21clX3fuc44O8hg+HM3P0M7NdZ38Tt/QIDs09VYmx09Vm6vMll3aPujZ TXMLBlawmwkYS0VdKGHeLW3QIRu0uV8yi0PW5Y4jVJNo9tH9dtBW48pRxc1xvpjRga 3hNCt5Uy/lIOFFTmPDDH23QdW6ya8Y4XhsHfVy3U= From: Tyler Retzlaff To: dev@dpdk.org Cc: Akhil Goyal , Anatoly Burakov , Andrew Rybchenko , Bruce Richardson , Chenbo Xia , Ciara Power , David Christensen , David Hunt , Dmitry Kozlyuk , Dmitry Malloy , Elena Agostini , Erik Gabriel Carrillo , Fan Zhang , Ferruh Yigit , Harman Kalra , Harry van Haaren , Honnappa Nagarahalli , Jerin Jacob , Konstantin Ananyev , Matan Azrad , Maxime Coquelin , Narcisa Ana Maria Vasile , Nicolas Chautru , Olivier Matz , Ori Kam , Pallavi Kadam , Pavan Nikhilesh , Reshma Pattan , Sameh Gobriel , Shijith Thotton , Sivaprasad Tummala , Stephen Hemminger , Suanming Mou , Sunil Kumar Kori , Thomas Monjalon , Viacheslav Ovsiienko , Vladimir Medvedkin , Yipeng Wang , Tyler Retzlaff Subject: [PATCH 18/21] ethdev: use rte optional stdatomic API Date: Mon, 16 Oct 2023 16:09:02 -0700 Message-Id: <1697497745-20664-19-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> References: <1697497745-20664-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding rte_atomic_xxx optional stdatomic API Signed-off-by: Tyler Retzlaff --- lib/ethdev/ethdev_driver.h | 16 ++++++++-------- lib/ethdev/ethdev_private.c | 6 +++--- lib/ethdev/rte_ethdev.c | 24 ++++++++++++------------ lib/ethdev/rte_ethdev.h | 16 ++++++++-------- lib/ethdev/rte_ethdev_core.h | 2 +- 5 files changed, 32 insertions(+), 32 deletions(-) diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index deb23ad..b482cd1 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -30,7 +30,7 @@ * queue on Rx and Tx. */ struct rte_eth_rxtx_callback { - struct rte_eth_rxtx_callback *next; + RTE_ATOMIC(struct rte_eth_rxtx_callback *) next; union{ rte_rx_callback_fn rx; rte_tx_callback_fn tx; @@ -80,12 +80,12 @@ struct rte_eth_dev { * User-supplied functions called from rx_burst to post-process * received packets before passing them to the user */ - struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; + RTE_ATOMIC(struct rte_eth_rxtx_callback *) post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; /** * User-supplied functions called from tx_burst to pre-process * received packets before passing them to the driver for transmission */ - struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; + RTE_ATOMIC(struct rte_eth_rxtx_callback *) pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; enum rte_eth_dev_state state; /**< Flag indicating the port state */ void *security_ctx; /**< Context for security ops */ @@ -1655,7 +1655,7 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev, rte_eth_linkstatus_set(struct rte_eth_dev *dev, const struct rte_eth_link *new_link) { - uint64_t *dev_link = (uint64_t *)&(dev->data->dev_link); + RTE_ATOMIC(uint64_t) *dev_link = (uint64_t __rte_atomic *)&(dev->data->dev_link); union { uint64_t val64; struct rte_eth_link link; @@ -1663,8 +1663,8 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev, RTE_BUILD_BUG_ON(sizeof(*new_link) != sizeof(uint64_t)); - orig.val64 = __atomic_exchange_n(dev_link, *(const uint64_t *)new_link, - __ATOMIC_SEQ_CST); + orig.val64 = rte_atomic_exchange_explicit(dev_link, *(const uint64_t *)new_link, + rte_memory_order_seq_cst); return (orig.link.link_status == new_link->link_status) ? -1 : 0; } @@ -1682,12 +1682,12 @@ int rte_eth_dev_callback_process(struct rte_eth_dev *dev, rte_eth_linkstatus_get(const struct rte_eth_dev *dev, struct rte_eth_link *link) { - uint64_t *src = (uint64_t *)&(dev->data->dev_link); + RTE_ATOMIC(uint64_t) *src = (uint64_t __rte_atomic *)&(dev->data->dev_link); uint64_t *dst = (uint64_t *)link; RTE_BUILD_BUG_ON(sizeof(*link) != sizeof(uint64_t)); - *dst = __atomic_load_n(src, __ATOMIC_SEQ_CST); + *dst = rte_atomic_load_explicit(src, rte_memory_order_seq_cst); } /** diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c index 7cc7f28..82e2568 100644 --- a/lib/ethdev/ethdev_private.c +++ b/lib/ethdev/ethdev_private.c @@ -245,7 +245,7 @@ struct dummy_queue { void eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo) { - static void *dummy_data[RTE_MAX_QUEUES_PER_PORT]; + static RTE_ATOMIC(void *) dummy_data[RTE_MAX_QUEUES_PER_PORT]; uintptr_t port_id = fpo - rte_eth_fp_ops; per_port_queues[port_id].rx_warn_once = false; @@ -278,10 +278,10 @@ struct dummy_queue { fpo->recycle_rx_descriptors_refill = dev->recycle_rx_descriptors_refill; fpo->rxq.data = dev->data->rx_queues; - fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs; + fpo->rxq.clbk = (void * __rte_atomic *)(uintptr_t)dev->post_rx_burst_cbs; fpo->txq.data = dev->data->tx_queues; - fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs; + fpo->txq.clbk = (void * __rte_atomic *)(uintptr_t)dev->pre_tx_burst_cbs; } uint16_t diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 9dabcb5..af23ac0 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -5654,9 +5654,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, /* Stores to cb->fn and cb->param should complete before * cb is visible to data plane. */ - __atomic_store_n( + rte_atomic_store_explicit( &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], - cb, __ATOMIC_RELEASE); + cb, rte_memory_order_release); } else { while (tail->next) @@ -5664,7 +5664,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, /* Stores to cb->fn and cb->param should complete before * cb is visible to data plane. */ - __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release); } rte_spinlock_unlock(ð_dev_rx_cb_lock); @@ -5704,9 +5704,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, /* Stores to cb->fn, cb->param and cb->next should complete before * cb is visible to data plane threads. */ - __atomic_store_n( + rte_atomic_store_explicit( &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], - cb, __ATOMIC_RELEASE); + cb, rte_memory_order_release); rte_spinlock_unlock(ð_dev_rx_cb_lock); rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, @@ -5757,9 +5757,9 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, /* Stores to cb->fn and cb->param should complete before * cb is visible to data plane. */ - __atomic_store_n( + rte_atomic_store_explicit( &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], - cb, __ATOMIC_RELEASE); + cb, rte_memory_order_release); } else { while (tail->next) @@ -5767,7 +5767,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, /* Stores to cb->fn and cb->param should complete before * cb is visible to data plane. */ - __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); + rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release); } rte_spinlock_unlock(ð_dev_tx_cb_lock); @@ -5791,7 +5791,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, struct rte_eth_dev *dev = &rte_eth_devices[port_id]; struct rte_eth_rxtx_callback *cb; - struct rte_eth_rxtx_callback **prev_cb; + RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb; int ret = -EINVAL; rte_spinlock_lock(ð_dev_rx_cb_lock); @@ -5800,7 +5800,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, cb = *prev_cb; if (cb == user_cb) { /* Remove the user cb from the callback list. */ - __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); + rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed); ret = 0; break; } @@ -5828,7 +5828,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, struct rte_eth_dev *dev = &rte_eth_devices[port_id]; int ret = -EINVAL; struct rte_eth_rxtx_callback *cb; - struct rte_eth_rxtx_callback **prev_cb; + RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb; rte_spinlock_lock(ð_dev_tx_cb_lock); prev_cb = &dev->pre_tx_burst_cbs[queue_id]; @@ -5836,7 +5836,7 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, cb = *prev_cb; if (cb == user_cb) { /* Remove the user cb from the callback list. */ - __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); + rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed); ret = 0; break; } diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index f949dfc..ec48b24 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -6018,14 +6018,14 @@ uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id, { void *cb; - /* __ATOMIC_RELEASE memory order was used when the + /* rte_memory_order_release memory order was used when the * call back was inserted into the list. * Since there is a clear dependency between loading - * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is + * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is * not required. */ - cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id], - __ATOMIC_RELAXED); + cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id], + rte_memory_order_relaxed); if (unlikely(cb != NULL)) nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id, rx_pkts, nb_rx, nb_pkts, cb); @@ -6355,14 +6355,14 @@ uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id, { void *cb; - /* __ATOMIC_RELEASE memory order was used when the + /* rte_memory_order_release memory order was used when the * call back was inserted into the list. * Since there is a clear dependency between loading - * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is + * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is * not required. */ - cb = __atomic_load_n((void **)&p->txq.clbk[queue_id], - __ATOMIC_RELAXED); + cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id], + rte_memory_order_relaxed); if (unlikely(cb != NULL)) nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id, tx_pkts, nb_pkts, cb); diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h index 32f5f73..4bfaf79 100644 --- a/lib/ethdev/rte_ethdev_core.h +++ b/lib/ethdev/rte_ethdev_core.h @@ -71,7 +71,7 @@ struct rte_ethdev_qdata { /** points to array of internal queue data pointers */ void **data; /** points to array of queue callback data pointers */ - void **clbk; + RTE_ATOMIC(void *) *clbk; }; /** -- 1.8.3.1