From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B68D941DAF; Thu, 2 Mar 2023 01:49:47 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 33E1D42D65; Thu, 2 Mar 2023 01:48:23 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 1331C427F5 for ; Thu, 2 Mar 2023 01:48:04 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1086) id 893C220BC5EA; Wed, 1 Mar 2023 16:48:02 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 893C220BC5EA DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1677718082; bh=klgKtzzR/RFrB4MS2zydqM0SZXjwsNfTQgacclab/uU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=OeI+4eUUCKdfIfp7mQ5t7mp2+YFDXP8kcfjtL0uB307jieNclwsLJWlXaXaVKOcbq AyYX+50RXvhA2LYgsJxAyRRkwkm3eIxxlAeBv7GRLiKbxAGolhqWiY9K7dSaxe9At2 64e5kmXo7fH+I6oLeUF65IK2yCz9JQZzkOuzQ49U= From: Tyler Retzlaff To: dev@dpdk.org Cc: Honnappa.Nagarahalli@arm.com, thomas@monjalon.net, Tyler Retzlaff Subject: [PATCH 13/17] drivers/event: use previous value atomic fetch operations Date: Wed, 1 Mar 2023 16:47:44 -0800 Message-Id: <1677718068-2412-14-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1677718068-2412-1-git-send-email-roretzla@linux.microsoft.com> References: <1677718068-2412-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Use __atomic_fetch_{add,and,or,sub,xor} instead of __atomic_{add,and,or,sub,xor}_fetch when we have no interest in the result of the operation. Reduces unnecessary codegen that provided the result of the atomic operation that was not used. Change brings closer alignment with atomics available in C11 standard and will reduce review effort when they are integrated. Signed-off-by: Tyler Retzlaff --- drivers/event/cnxk/cnxk_eventdev_selftest.c | 12 ++++++------ drivers/event/cnxk/cnxk_tim_worker.h | 6 +++--- drivers/event/dsw/dsw_event.c | 6 +++--- drivers/event/octeontx/timvf_worker.h | 6 +++--- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/event/cnxk/cnxk_eventdev_selftest.c b/drivers/event/cnxk/cnxk_eventdev_selftest.c index 577c99b..95c0f1b 100644 --- a/drivers/event/cnxk/cnxk_eventdev_selftest.c +++ b/drivers/event/cnxk/cnxk_eventdev_selftest.c @@ -554,7 +554,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port, ret = validate_event(&ev); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event"); rte_pktmbuf_free(ev.mbuf); - __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED); + __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); } return 0; @@ -916,7 +916,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port, if (seqn_list_update(seqn) == 0) { rte_pktmbuf_free(ev.mbuf); - __atomic_sub_fetch(total_events, 1, + __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); } else { plt_err("Failed to update seqn_list"); @@ -1072,7 +1072,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port, if (seqn_list_update(seqn) == 0) { rte_pktmbuf_free(ev.mbuf); - __atomic_sub_fetch(total_events, 1, + __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); } else { plt_err("Failed to update seqn_list"); @@ -1217,7 +1217,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port, if (ev.sub_event_type == MAX_STAGES) { /* last stage */ rte_pktmbuf_free(ev.mbuf); - __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED); + __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); } else { ev.event_type = RTE_EVENT_TYPE_CPU; ev.sub_event_type++; @@ -1293,7 +1293,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port, if (ev.queue_id == nr_queues - 1) { /* last stage */ rte_pktmbuf_free(ev.mbuf); - __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED); + __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); } else { ev.event_type = RTE_EVENT_TYPE_CPU; ev.queue_id++; @@ -1338,7 +1338,7 @@ typedef int (*validate_event_cb)(uint32_t index, uint8_t port, if (ev.queue_id == nr_queues - 1) { /* Last stage */ rte_pktmbuf_free(ev.mbuf); - __atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED); + __atomic_fetch_sub(total_events, 1, __ATOMIC_RELAXED); } else { ev.event_type = RTE_EVENT_TYPE_CPU; ev.queue_id++; diff --git a/drivers/event/cnxk/cnxk_tim_worker.h b/drivers/event/cnxk/cnxk_tim_worker.h index a326d55..c087fe5 100644 --- a/drivers/event/cnxk/cnxk_tim_worker.h +++ b/drivers/event/cnxk/cnxk_tim_worker.h @@ -102,19 +102,19 @@ static inline void cnxk_tim_bkt_inc_nent(struct cnxk_tim_bkt *bktp) { - __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED); } static inline void cnxk_tim_bkt_add_nent_relaxed(struct cnxk_tim_bkt *bktp, uint32_t v) { - __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED); + __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED); } static inline void cnxk_tim_bkt_add_nent(struct cnxk_tim_bkt *bktp, uint32_t v) { - __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELEASE); + __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELEASE); } static inline uint64_t diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c index 9932caf..89badca 100644 --- a/drivers/event/dsw/dsw_event.c +++ b/drivers/event/dsw/dsw_event.c @@ -50,7 +50,7 @@ if (unlikely(new_total_on_loan > dsw->max_inflight)) { /* Some other port took the last credits */ - __atomic_sub_fetch(&dsw->credits_on_loan, acquired_credits, + __atomic_fetch_sub(&dsw->credits_on_loan, acquired_credits, __ATOMIC_RELAXED); return false; } @@ -77,7 +77,7 @@ port->inflight_credits = leave_credits; - __atomic_sub_fetch(&dsw->credits_on_loan, return_credits, + __atomic_fetch_sub(&dsw->credits_on_loan, return_credits, __ATOMIC_RELAXED); DSW_LOG_DP_PORT(DEBUG, port->id, @@ -527,7 +527,7 @@ struct dsw_queue_flow_burst { target_qfs[*targets_len] = *candidate_qf; (*targets_len)++; - __atomic_add_fetch(&dsw->ports[candidate_port_id].immigration_load, + __atomic_fetch_add(&dsw->ports[candidate_port_id].immigration_load, candidate_flow_load, __ATOMIC_RELAXED); return true; diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h index 3f1e77f..ad98ca0 100644 --- a/drivers/event/octeontx/timvf_worker.h +++ b/drivers/event/octeontx/timvf_worker.h @@ -108,7 +108,7 @@ static inline void timr_bkt_dec_lock(struct tim_mem_bucket *bktp) { - __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL); + __atomic_fetch_add(&bktp->lock, 0xff, __ATOMIC_ACQ_REL); } static inline uint32_t @@ -121,13 +121,13 @@ static inline void timr_bkt_inc_nent(struct tim_mem_bucket *bktp) { - __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&bktp->nb_entry, 1, __ATOMIC_RELAXED); } static inline void timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v) { - __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED); + __atomic_fetch_add(&bktp->nb_entry, v, __ATOMIC_RELAXED); } static inline uint64_t -- 1.8.3.1