From: rsanford2@gmail.com
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 3/3] timer: fix race condition in rte_timer_manage()
Date: Thu, 23 Jul 2015 18:42:27 -0400 [thread overview]
Message-ID: <1437691347-58708-4-git-send-email-rsanford2@gmail.com> (raw)
In-Reply-To: <1437691347-58708-1-git-send-email-rsanford2@gmail.com>
From: Robert Sanford <rsanford@akamai.com>
Signed-off-by: Robert Sanford <rsanford@akamai.com>
---
lib/librte_timer/rte_timer.c | 45 +++++++++++++++++++++++++++--------------
1 files changed, 29 insertions(+), 16 deletions(-)
diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c
index 8e9243a..51e6038 100644
--- a/lib/librte_timer/rte_timer.c
+++ b/lib/librte_timer/rte_timer.c
@@ -504,6 +504,7 @@ void rte_timer_manage(void)
{
union rte_timer_status status;
struct rte_timer *tim, *next_tim;
+ struct rte_timer *run_first_tim, **pprev;
unsigned lcore_id = rte_lcore_id();
struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
uint64_t cur_time;
@@ -531,8 +532,10 @@ void rte_timer_manage(void)
/* if nothing to do just unlock and return */
if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
- priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time)
- goto done;
+ priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) {
+ rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
+ return;
+ }
/* save start of list of expired timers */
tim = priv_timer[lcore_id].pending_head.sl_next[0];
@@ -546,24 +549,40 @@ void rte_timer_manage(void)
prev[i] ->sl_next[i] = NULL;
}
- /* now scan expired list and call callbacks */
+ /* transition run-list from PENDING to RUNNING */
+ run_first_tim = tim;
+ pprev = &run_first_tim;
+
for ( ; tim != NULL; tim = next_tim) {
next_tim = tim->sl_next[0];
ret = timer_set_running_state(tim);
+ if (likely(ret == 0)) {
+ pprev = &tim->sl_next[0];
+ } else {
+ /* another core is trying to re-config this one,
+ * remove it from local expired list and put it
+ * back on the priv_timer[] skip list */
+ *pprev = next_tim;
+ timer_add(tim, lcore_id, 1);
+ }
+ }
- /* this timer was not pending, continue */
- if (ret < 0)
- continue;
+ /* update the next to expire timer value */
+ priv_timer[lcore_id].pending_head.expire =
+ (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
+ priv_timer[lcore_id].pending_head.sl_next[0]->expire;
- rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
+ rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
+ /* now scan expired list and call callbacks */
+ for (tim = run_first_tim; tim != NULL; tim = next_tim) {
+ next_tim = tim->sl_next[0];
priv_timer[lcore_id].updated = 0;
/* execute callback function with list unlocked */
tim->f(tim, tim->arg);
- rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
__TIMER_STAT_ADD(pending, -1);
/* the timer was stopped or reloaded by the callback
* function, we have nothing to do here */
@@ -579,6 +598,7 @@ void rte_timer_manage(void)
}
else {
/* keep it in list and mark timer as pending */
+ rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
status.state = RTE_TIMER_PENDING;
__TIMER_STAT_ADD(pending, 1);
status.owner = (int16_t)lcore_id;
@@ -586,16 +606,9 @@ void rte_timer_manage(void)
tim->status.u32 = status.u32;
__rte_timer_reset(tim, cur_time + tim->period,
tim->period, lcore_id, tim->f, tim->arg, 1);
+ rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
}
}
-
- /* update the next to expire timer value */
- priv_timer[lcore_id].pending_head.expire =
- (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
- priv_timer[lcore_id].pending_head.sl_next[0]->expire;
-done:
- /* job finished, unlock the list lock */
- rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
}
/* dump statistics about timers */
--
1.7.1
next prev parent reply other threads:[~2015-07-23 22:43 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-23 22:42 [dpdk-dev] [PATCH 0/3] timer: fix rte_timer_manage and improve unit tests rsanford2
2015-07-23 22:42 ` [dpdk-dev] [PATCH 1/3] timer: fix stress test 2 synchronization bug rsanford2
2015-07-23 22:42 ` [dpdk-dev] [PATCH 2/3] timer: add timer-manage race condition test rsanford2
2015-07-23 22:42 ` rsanford2 [this message]
2015-07-26 14:11 ` [dpdk-dev] [PATCH 0/3] timer: fix rte_timer_manage and improve unit tests Thomas Monjalon
2015-07-27 15:46 ` Sanford, Robert
2015-07-27 15:53 ` Thomas Monjalon
2015-07-27 22:46 ` [dpdk-dev] [PATCH v2 " rsanford2
2015-08-02 22:06 ` Thomas Monjalon
2015-07-27 22:46 ` [dpdk-dev] [PATCH v2 1/3] timer: fix stress test 2 synchronization bug rsanford2
2015-07-27 22:46 ` [dpdk-dev] [PATCH v2 2/3] timer: add timer-manage race condition test rsanford2
2015-07-27 22:46 ` [dpdk-dev] [PATCH v2 3/3] timer: fix race condition in rte_timer_manage() rsanford2
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1437691347-58708-4-git-send-email-rsanford2@gmail.com \
--to=rsanford2@gmail.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).