From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1E11E45E27; Wed, 4 Dec 2024 03:20:15 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DBBCC40261; Wed, 4 Dec 2024 03:20:14 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id D5F694025D for ; Wed, 4 Dec 2024 03:20:12 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1213) id 086CF20BCAE2; Tue, 3 Dec 2024 18:20:12 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 086CF20BCAE2 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1733278812; bh=m2I5vaL4xlMwiuKW/xzcRuGbRApcLb9LI+vUeHv6Hkc=; h=From:To:Cc:Subject:Date:From; b=XCrCOd68jWoqAGMZP8Yqvu1hk4IJEwCPHfqjfoEmfxlSBWFXzUXACjQKUShkdsFQz TlDEIGnKHKK/L3q8EkJGbwd4qz8VNwFQpTHU9Yo6foDREP/QZK1YKodRvTjs3bRdoX 1yA3AsrLscCx49Tx6zyRfPPtXhqPrIYqS51dlOFA= From: Andre Muezerie To: Bruce Richardson , Vladimir Medvedkin Cc: dev@dpdk.org, Andre Muezerie Subject: [PATCH] lib/lpm: use standard atomic_store_explicit Date: Tue, 3 Dec 2024 18:20:01 -0800 Message-Id: <1733278801-19296-1-git-send-email-andremue@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org MSVC issues the warning below: ../lib/lpm/rte_lpm.c(297): warning C4013 '__atomic_store' undefined; assuming extern returning int ../lib/lpm/rte_lpm.c(298): error C2065: '__ATOMIC_RELAXED': undeclared identifier The fix is to use standard atomic_store_explicit() instead of gcc specific __atomic_store(). atomic_store_explicit() was already being used in other parts of DPDK and is compatible with many compilers, including MSVC. Signed-off-by: Andre Muezerie --- lib/lpm/rte_lpm.c | 108 ++++++++++++++++++++++++++++++---------------- lib/lpm/rte_lpm.h | 56 ++++++++++++++---------- 2 files changed, 104 insertions(+), 60 deletions(-) diff --git a/lib/lpm/rte_lpm.c b/lib/lpm/rte_lpm.c index a5c9e7c9fc..7ec85f1718 100644 --- a/lib/lpm/rte_lpm.c +++ b/lib/lpm/rte_lpm.c @@ -294,8 +294,8 @@ __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n) RTE_SET_USED(n); /* Set tbl8 group invalid */ - __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&tbl8[tbl8_group_index].val, + zero_tbl8_entry.val, rte_memory_order_relaxed); } /* Associate QSBR variable with an LPM object. @@ -515,8 +515,8 @@ _tbl8_alloc(struct __rte_lpm *i_lpm) RTE_LPM_TBL8_GROUP_NUM_ENTRIES * sizeof(tbl8_entry[0])); - __atomic_store(tbl8_entry, &new_tbl8_entry, - __ATOMIC_RELAXED); + rte_atomic_store_explicit(&tbl8_entry->val, new_tbl8_entry.val, + rte_memory_order_relaxed); /* Return group index for allocated tbl8 group. */ return group_idx; @@ -551,15 +551,19 @@ tbl8_free(struct __rte_lpm *i_lpm, uint32_t tbl8_group_start) if (i_lpm->v == NULL) { /* Set tbl8 group invalid*/ - __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry, - __ATOMIC_RELAXED); + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[tbl8_group_start]; + rte_atomic_store_explicit(&tbl8_entry->val, zero_tbl8_entry.val, + rte_memory_order_relaxed); } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) { /* Wait for quiescent state change. */ rte_rcu_qsbr_synchronize(i_lpm->v, RTE_QSBR_THRID_INVALID); /* Set tbl8 group invalid*/ - __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry, - __ATOMIC_RELAXED); + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[tbl8_group_start]; + rte_atomic_store_explicit(&tbl8_entry->val, zero_tbl8_entry.val, + rte_memory_order_relaxed); } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { /* Push into QSBR defer queue. */ status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq, @@ -602,8 +606,10 @@ add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth, /* Setting tbl24 entry in one go to avoid race * conditions */ - __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry, - __ATOMIC_RELEASE); + struct rte_lpm_tbl_entry *tbl24_entry = + &i_lpm->lpm.tbl24[i]; + rte_atomic_store_explicit(&tbl24_entry->val, new_tbl24_entry.val, + rte_memory_order_release); continue; } @@ -632,9 +638,11 @@ add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth, * Setting tbl8 entry in one go to avoid * race conditions */ - __atomic_store(&i_lpm->lpm.tbl8[j], - &new_tbl8_entry, - __ATOMIC_RELAXED); + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[j]; + rte_atomic_store_explicit(&tbl8_entry->val, + new_tbl8_entry.val, + rte_memory_order_relaxed); continue; } @@ -679,8 +687,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth, .valid_group = i_lpm->lpm.tbl8[i].valid_group, .next_hop = next_hop, }; - __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[i]; + rte_atomic_store_explicit(&tbl8_entry->val, new_tbl8_entry.val, + rte_memory_order_relaxed); } /* @@ -699,8 +709,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth, /* The tbl24 entry must be written only after the * tbl8 entries are written. */ - __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry, - __ATOMIC_RELEASE); + struct rte_lpm_tbl_entry *tbl24_entry = + &i_lpm->lpm.tbl24[tbl24_index]; + rte_atomic_store_explicit(&tbl24_entry->val, new_tbl24_entry.val, + rte_memory_order_release); } /* If valid entry but not extended calculate the index into Table8. */ else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) { @@ -724,8 +736,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth, .valid_group = i_lpm->lpm.tbl8[i].valid_group, .next_hop = i_lpm->lpm.tbl24[tbl24_index].next_hop, }; - __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[i]; + rte_atomic_store_explicit(&tbl8_entry->val, new_tbl8_entry.val, + rte_memory_order_relaxed); } tbl8_index = tbl8_group_start + (ip_masked & 0xFF); @@ -738,8 +752,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth, .valid_group = i_lpm->lpm.tbl8[i].valid_group, .next_hop = next_hop, }; - __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[i]; + rte_atomic_store_explicit(&tbl8_entry->val, new_tbl8_entry.val, + rte_memory_order_relaxed); } /* @@ -758,8 +774,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth, /* The tbl24 entry must be written only after the * tbl8 entries are written. */ - __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry, - __ATOMIC_RELEASE); + struct rte_lpm_tbl_entry *tbl24_entry = + &i_lpm->lpm.tbl24[tbl24_index]; + rte_atomic_store_explicit(&tbl24_entry->val, new_tbl24_entry.val, + rte_memory_order_release); } else { /* * If it is valid, extended entry calculate the index into tbl8. @@ -784,8 +802,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth, * Setting tbl8 entry in one go to avoid race * condition */ - __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[i]; + rte_atomic_store_explicit(&tbl8_entry->val, + new_tbl8_entry.val, rte_memory_order_relaxed); continue; } @@ -924,8 +944,10 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked, if (i_lpm->lpm.tbl24[i].valid_group == 0 && i_lpm->lpm.tbl24[i].depth <= depth) { - __atomic_store(&i_lpm->lpm.tbl24[i], - &zero_tbl24_entry, __ATOMIC_RELEASE); + struct rte_lpm_tbl_entry *tbl24_entry = + &i_lpm->lpm.tbl24[i]; + rte_atomic_store_explicit(&tbl24_entry->val, + zero_tbl24_entry.val, rte_memory_order_release); } else if (i_lpm->lpm.tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has @@ -970,8 +992,10 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked, if (i_lpm->lpm.tbl24[i].valid_group == 0 && i_lpm->lpm.tbl24[i].depth <= depth) { - __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry, - __ATOMIC_RELEASE); + struct rte_lpm_tbl_entry *tbl24_entry = + &i_lpm->lpm.tbl24[i]; + rte_atomic_store_explicit(&tbl24_entry->val, + new_tbl24_entry.val, rte_memory_order_release); } else if (i_lpm->lpm.tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has @@ -986,10 +1010,13 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked, for (j = tbl8_index; j < (tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { - if (i_lpm->lpm.tbl8[j].depth <= depth) - __atomic_store(&i_lpm->lpm.tbl8[j], - &new_tbl8_entry, - __ATOMIC_RELAXED); + if (i_lpm->lpm.tbl8[j].depth <= depth) { + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[j]; + rte_atomic_store_explicit(&tbl8_entry->val, + new_tbl8_entry.val, + rte_memory_order_relaxed); + } } } } @@ -1097,9 +1124,12 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, * rule_to_delete must be modified. */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - if (i_lpm->lpm.tbl8[i].depth <= depth) - __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); + if (i_lpm->lpm.tbl8[i].depth <= depth) { + struct rte_lpm_tbl_entry *tbl8_entry = + &i_lpm->lpm.tbl8[i]; + rte_atomic_store_explicit(&tbl8_entry->val, + new_tbl8_entry.val, rte_memory_order_relaxed); + } } } @@ -1130,8 +1160,10 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, /* Set tbl24 before freeing tbl8 to avoid race condition. * Prevent the free of the tbl8 group from hoisting. */ - __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry, - __ATOMIC_RELAXED); + struct rte_lpm_tbl_entry *tbl24_entry = + &i_lpm->lpm.tbl24[tbl24_index]; + rte_atomic_store_explicit(&tbl24_entry->val, new_tbl24_entry.val, + rte_memory_order_relaxed); rte_atomic_thread_fence(rte_memory_order_release); status = tbl8_free(i_lpm, tbl8_group_start); } diff --git a/lib/lpm/rte_lpm.h b/lib/lpm/rte_lpm.h index 329dc1aad4..c1f30f96e3 100644 --- a/lib/lpm/rte_lpm.h +++ b/lib/lpm/rte_lpm.h @@ -77,38 +77,50 @@ enum rte_lpm_qsbr_mode { /** @internal Tbl24 entry structure. */ __extension__ struct rte_lpm_tbl_entry { - /** - * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or - * a group index pointing to a tbl8 structure (tbl24 only, when - * valid_group is set) - */ - uint32_t next_hop :24; - /* Using single uint8_t to store 3 values. */ - uint32_t valid :1; /**< Validation flag. */ - /** - * For tbl24: - * - valid_group == 0: entry stores a next hop - * - valid_group == 1: entry stores a group_index pointing to a tbl8 - * For tbl8: - * - valid_group indicates whether the current tbl8 is in use or not - */ - uint32_t valid_group :1; - uint32_t depth :6; /**< Rule depth. */ + union { + RTE_ATOMIC(uint32_t) val; + struct { + /** + * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or + * a group index pointing to a tbl8 structure (tbl24 only, when + * valid_group is set) + */ + uint32_t next_hop :24; + /* Using single uint8_t to store 3 values. */ + uint32_t valid :1; /**< Validation flag. */ + /** + * For tbl24: + * - valid_group == 0: entry stores a next hop + * - valid_group == 1: entry stores a group_index pointing to a tbl8 + * For tbl8: + * - valid_group indicates whether the current tbl8 is in use or not + */ + uint32_t valid_group :1; + uint32_t depth :6; /**< Rule depth. */ + }; + }; }; #else __extension__ struct rte_lpm_tbl_entry { - uint32_t depth :6; - uint32_t valid_group :1; - uint32_t valid :1; - uint32_t next_hop :24; - + union { + RTE_ATOMIC(uint32_t) val; + struct { + uint32_t depth :6; + uint32_t valid_group :1; + uint32_t valid :1; + uint32_t next_hop :24; + }; + }; }; #endif +static_assert(sizeof(struct rte_lpm_tbl_entry) == sizeof(uint32_t), + "sizeof(struct rte_lpm_tbl_entry) == sizeof(uint32_t)"); + /** LPM configuration structure. */ struct rte_lpm_config { uint32_t max_rules; /**< Max number of rules. */ -- 2.47.0.vfs.0.3