From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v3 2/4] event/octeontx2: optimize timer arm routine
Date: Tue, 23 Mar 2021 14:14:36 +0530 [thread overview]
Message-ID: <20210323084439.3898-2-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20210323084439.3898-1-pbhagavatula@marvell.com>
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Use relaxed load exclusive when polling for other threads or
hardware to complete.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
v3 Changes:
- Fix incorrect asm register usage detected by clang.
drivers/event/octeontx2/otx2_tim_worker.c | 1 +
drivers/event/octeontx2/otx2_tim_worker.h | 163 ++++++++++++----------
2 files changed, 90 insertions(+), 74 deletions(-)
diff --git a/drivers/event/octeontx2/otx2_tim_worker.c b/drivers/event/octeontx2/otx2_tim_worker.c
index eb901844d..6a3511ec0 100644
--- a/drivers/event/octeontx2/otx2_tim_worker.c
+++ b/drivers/event/octeontx2/otx2_tim_worker.c
@@ -170,6 +170,7 @@ otx2_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
int ret;
RTE_SET_USED(adptr);
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
for (index = 0; index < nb_timers; index++) {
if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
rte_errno = EALREADY;
diff --git a/drivers/event/octeontx2/otx2_tim_worker.h b/drivers/event/octeontx2/otx2_tim_worker.h
index f03912b81..5ece8fd05 100644
--- a/drivers/event/octeontx2/otx2_tim_worker.h
+++ b/drivers/event/octeontx2/otx2_tim_worker.h
@@ -84,7 +84,13 @@ tim_bkt_inc_lock(struct otx2_tim_bkt *bktp)
static inline void
tim_bkt_dec_lock(struct otx2_tim_bkt *bktp)
{
- __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_RELEASE);
+ __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELEASE);
+}
+
+static inline void
+tim_bkt_dec_lock_relaxed(struct otx2_tim_bkt *bktp)
+{
+ __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELAXED);
}
static inline uint32_t
@@ -246,22 +252,20 @@ tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
if (tim_bkt_get_nent(lock_sema) != 0) {
uint64_t hbt_state;
#ifdef RTE_ARCH_ARM64
- asm volatile(
- " ldaxr %[hbt], [%[w1]] \n"
- " tbz %[hbt], 33, dne%= \n"
- " sevl \n"
- "rty%=: wfe \n"
- " ldaxr %[hbt], [%[w1]] \n"
- " tbnz %[hbt], 33, rty%= \n"
- "dne%=: \n"
- : [hbt] "=&r" (hbt_state)
- : [w1] "r" ((&bkt->w1))
- : "memory"
- );
+ asm volatile(" ldxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r"(hbt_state)
+ : [w1] "r"((&bkt->w1))
+ : "memory");
#else
do {
hbt_state = __atomic_load_n(&bkt->w1,
- __ATOMIC_ACQUIRE);
+ __ATOMIC_RELAXED);
} while (hbt_state & BIT_ULL(33));
#endif
@@ -282,10 +286,10 @@ tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
if (unlikely(chunk == NULL)) {
bkt->chunk_remainder = 0;
- tim_bkt_dec_lock(bkt);
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim->state = RTE_EVENT_TIMER_ERROR;
+ tim_bkt_dec_lock(bkt);
return -ENOMEM;
}
mirr_bkt->current_chunk = (uintptr_t)chunk;
@@ -298,12 +302,11 @@ tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
/* Copy work entry. */
*chunk = *pent;
- tim_bkt_inc_nent(bkt);
- tim_bkt_dec_lock(bkt);
-
tim->impl_opaque[0] = (uintptr_t)chunk;
tim->impl_opaque[1] = (uintptr_t)bkt;
- tim->state = RTE_EVENT_TIMER_ARMED;
+ __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
+ tim_bkt_inc_nent(bkt);
+ tim_bkt_dec_lock_relaxed(bkt);
return 0;
}
@@ -331,22 +334,20 @@ tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
if (tim_bkt_get_nent(lock_sema) != 0) {
uint64_t hbt_state;
#ifdef RTE_ARCH_ARM64
- asm volatile(
- " ldaxr %[hbt], [%[w1]] \n"
- " tbz %[hbt], 33, dne%= \n"
- " sevl \n"
- "rty%=: wfe \n"
- " ldaxr %[hbt], [%[w1]] \n"
- " tbnz %[hbt], 33, rty%= \n"
- "dne%=: \n"
- : [hbt] "=&r" (hbt_state)
- : [w1] "r" ((&bkt->w1))
- : "memory"
- );
+ asm volatile(" ldxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r"(hbt_state)
+ : [w1] "r"((&bkt->w1))
+ : "memory");
#else
do {
hbt_state = __atomic_load_n(&bkt->w1,
- __ATOMIC_ACQUIRE);
+ __ATOMIC_RELAXED);
} while (hbt_state & BIT_ULL(33));
#endif
@@ -359,26 +360,24 @@ tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
rem = tim_bkt_fetch_rem(lock_sema);
if (rem < 0) {
+ tim_bkt_dec_lock(bkt);
#ifdef RTE_ARCH_ARM64
- asm volatile(
- " ldaxrh %w[rem], [%[crem]] \n"
- " tbz %w[rem], 15, dne%= \n"
- " sevl \n"
- "rty%=: wfe \n"
- " ldaxrh %w[rem], [%[crem]] \n"
- " tbnz %w[rem], 15, rty%= \n"
- "dne%=: \n"
- : [rem] "=&r" (rem)
- : [crem] "r" (&bkt->chunk_remainder)
- : "memory"
- );
+ uint64_t w1;
+ asm volatile(" ldxr %[w1], [%[crem]] \n"
+ " tbz %[w1], 63, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxr %[w1], [%[crem]] \n"
+ " tbnz %[w1], 63, rty%= \n"
+ "dne%=: \n"
+ : [w1] "=&r"(w1)
+ : [crem] "r"(&bkt->w1)
+ : "memory");
#else
- while (__atomic_load_n(&bkt->chunk_remainder,
- __ATOMIC_ACQUIRE) < 0)
+ while (__atomic_load_n((int64_t *)&bkt->w1, __ATOMIC_RELAXED) <
+ 0)
;
#endif
- /* Goto diff bucket. */
- tim_bkt_dec_lock(bkt);
goto __retry;
} else if (!rem) {
/* Only one thread can be here*/
@@ -388,18 +387,21 @@ tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
if (unlikely(chunk == NULL)) {
- tim_bkt_set_rem(bkt, 0);
- tim_bkt_dec_lock(bkt);
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim->state = RTE_EVENT_TIMER_ERROR;
+ tim_bkt_set_rem(bkt, 0);
+ tim_bkt_dec_lock(bkt);
return -ENOMEM;
}
*chunk = *pent;
- while (tim_bkt_fetch_lock(lock_sema) !=
- (-tim_bkt_fetch_rem(lock_sema)))
- lock_sema = __atomic_load_n(&bkt->w1, __ATOMIC_ACQUIRE);
-
+ if (tim_bkt_fetch_lock(lock_sema)) {
+ do {
+ lock_sema = __atomic_load_n(&bkt->w1,
+ __ATOMIC_RELAXED);
+ } while (tim_bkt_fetch_lock(lock_sema) - 1);
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ }
mirr_bkt->current_chunk = (uintptr_t)chunk;
__atomic_store_n(&bkt->chunk_remainder,
tim_ring->nb_chunk_slots - 1, __ATOMIC_RELEASE);
@@ -409,12 +411,11 @@ tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
*chunk = *pent;
}
- /* Copy work entry. */
- tim_bkt_inc_nent(bkt);
- tim_bkt_dec_lock(bkt);
tim->impl_opaque[0] = (uintptr_t)chunk;
tim->impl_opaque[1] = (uintptr_t)bkt;
- tim->state = RTE_EVENT_TIMER_ARMED;
+ __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
+ tim_bkt_inc_nent(bkt);
+ tim_bkt_dec_lock_relaxed(bkt);
return 0;
}
@@ -463,6 +464,23 @@ tim_add_entry_brst(struct otx2_tim_ring * const tim_ring,
if (lock_cnt) {
tim_bkt_dec_lock(bkt);
+#ifdef RTE_ARCH_ARM64
+ asm volatile(" ldxrb %w[lock_cnt], [%[lock]] \n"
+ " tst %w[lock_cnt], 255 \n"
+ " beq dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxrb %w[lock_cnt], [%[lock]] \n"
+ " tst %w[lock_cnt], 255 \n"
+ " bne rty%= \n"
+ "dne%=: \n"
+ : [lock_cnt] "=&r"(lock_cnt)
+ : [lock] "r"(&bkt->lock)
+ : "memory");
+#else
+ while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED))
+ ;
+#endif
goto __retry;
}
@@ -471,22 +489,20 @@ tim_add_entry_brst(struct otx2_tim_ring * const tim_ring,
if (tim_bkt_get_nent(lock_sema) != 0) {
uint64_t hbt_state;
#ifdef RTE_ARCH_ARM64
- asm volatile(
- " ldaxr %[hbt], [%[w1]] \n"
- " tbz %[hbt], 33, dne%= \n"
- " sevl \n"
- "rty%=: wfe \n"
- " ldaxr %[hbt], [%[w1]] \n"
- " tbnz %[hbt], 33, rty%= \n"
- "dne%=: \n"
- : [hbt] "=&r" (hbt_state)
- : [w1] "r" ((&bkt->w1))
- : "memory"
- );
+ asm volatile(" ldxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r"(hbt_state)
+ : [w1] "r"((&bkt->w1))
+ : "memory");
#else
do {
hbt_state = __atomic_load_n(&bkt->w1,
- __ATOMIC_ACQUIRE);
+ __ATOMIC_RELAXED);
} while (hbt_state & BIT_ULL(33));
#endif
@@ -563,19 +579,18 @@ tim_rm_entry(struct rte_event_timer *tim)
bkt = (struct otx2_tim_bkt *)(uintptr_t)tim->impl_opaque[1];
lock_sema = tim_bkt_inc_lock(bkt);
if (tim_bkt_get_hbt(lock_sema) || !tim_bkt_get_nent(lock_sema)) {
- tim_bkt_dec_lock(bkt);
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
+ tim_bkt_dec_lock(bkt);
return -ENOENT;
}
entry->w0 = 0;
entry->wqe = 0;
- tim_bkt_dec_lock(bkt);
-
tim->state = RTE_EVENT_TIMER_CANCELED;
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
+ tim_bkt_dec_lock(bkt);
return 0;
}
--
2.17.1
next prev parent reply other threads:[~2021-03-23 8:44 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-25 12:23 [dpdk-dev] [PATCH 1/4] event/octeontx2: simplify timer bucket estimation pbhagavatula
2021-02-25 12:23 ` [dpdk-dev] [PATCH 2/4] event/octeontx2: optimize timer arm routine pbhagavatula
2021-02-25 12:23 ` [dpdk-dev] [PATCH 3/4] event/octeontx2: reduce chunk pool memory usage pbhagavatula
2021-03-20 13:30 ` Jerin Jacob
2021-02-25 12:23 ` [dpdk-dev] [PATCH 4/4] event/octeontx2: timer always use virtual counter pbhagavatula
2021-03-20 13:34 ` Jerin Jacob
2021-03-21 7:11 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-03-21 8:49 ` [dpdk-dev] [PATCH v2 1/4] event/octeontx2: simplify timer bucket estimation pbhagavatula
2021-03-21 8:49 ` [dpdk-dev] [PATCH v2 2/4] event/octeontx2: optimize timer arm routine pbhagavatula
2021-03-21 8:49 ` [dpdk-dev] [PATCH v2 3/4] event/octeontx2: reduce chunk pool memory usage pbhagavatula
2021-03-21 8:49 ` [dpdk-dev] [PATCH v2 4/4] event/octeontx2: timer always use virtual counter pbhagavatula
2021-03-22 16:13 ` Jerin Jacob
2021-03-23 8:44 ` [dpdk-dev] [PATCH v3 1/4] event/octeontx2: simplify timer bucket estimation pbhagavatula
2021-03-23 8:44 ` pbhagavatula [this message]
2021-03-23 8:44 ` [dpdk-dev] [PATCH v3 3/4] event/octeontx2: reduce chunk pool memory usage pbhagavatula
2021-03-23 8:44 ` [dpdk-dev] [PATCH v3 4/4] event/octeontx2: timer always use virtual counter pbhagavatula
2021-03-24 7:44 ` [dpdk-dev] [PATCH v3 1/4] event/octeontx2: simplify timer bucket estimation Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210323084439.3898-2-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).