DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] eventdev/timer: fix overflow issue
@ 2023-01-24 16:09 Erik Gabriel Carrillo
  2023-01-24 20:45 ` [PATCH v2] " Erik Gabriel Carrillo
  2023-01-25  2:47 ` [PATCH] " Stephen Hemminger
  0 siblings, 2 replies; 9+ messages in thread
From: Erik Gabriel Carrillo @ 2023-01-24 16:09 UTC (permalink / raw)
  To: jerinj; +Cc: dev, stable

The software timer adapter converts event timer timeout ticks to a
number of CPU cycles at which an rte_timer should expire. The
computation uses integer operations that can result in overflow.

Use floating point operations instead to perform the computation, and
convert the final result back to an integer type when returning. Also
move the logic that checks the timeout range into the function that
performs the above computation.

Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
Cc: stable@dpdk.org

Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
---
 lib/eventdev/rte_event_timer_adapter.c | 72 +++++++++++++-------------
 1 file changed, 35 insertions(+), 37 deletions(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index d357f7403a..dbfb91ec1c 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -719,13 +719,29 @@ swtim_callback(struct rte_timer *tim)
 	}
 }
 
-static __rte_always_inline uint64_t
+static __rte_always_inline int
 get_timeout_cycles(struct rte_event_timer *evtim,
-		   const struct rte_event_timer_adapter *adapter)
+		   const struct rte_event_timer_adapter *adapter,
+		   uint64_t *timeout_cycles)
 {
 	struct swtim *sw = swtim_pmd_priv(adapter);
-	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
-	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
+	uint64_t timeout_nsecs;
+	double cycles_per_nsec;
+
+	cycles_per_nsec = (double)rte_get_timer_hz() / NSECPERSEC;
+
+	timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
+	if (timeout_nsecs > sw->max_tmo_ns ||
+	    timeout_nsecs > UINT64_MAX / cycles_per_nsec)
+		return -1;
+	if (timeout_nsecs < sw->timer_tick_ns)
+		return -2;
+
+	if (timeout_cycles)
+		*timeout_cycles = (uint64_t)ceil(timeout_nsecs *
+						 cycles_per_nsec);
+
+	return 0;
 }
 
 /* This function returns true if one or more (adapter) ticks have occurred since
@@ -759,23 +775,6 @@ swtim_did_tick(struct swtim *sw)
 	return false;
 }
 
-/* Check that event timer timeout value is in range */
-static __rte_always_inline int
-check_timeout(struct rte_event_timer *evtim,
-	      const struct rte_event_timer_adapter *adapter)
-{
-	uint64_t tmo_nsec;
-	struct swtim *sw = swtim_pmd_priv(adapter);
-
-	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
-	if (tmo_nsec > sw->max_tmo_ns)
-		return -1;
-	if (tmo_nsec < sw->timer_tick_ns)
-		return -2;
-
-	return 0;
-}
-
 /* Check that event timer event queue sched type matches destination event queue
  * sched type
  */
@@ -1195,21 +1194,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
 			break;
 		}
 
-		ret = check_timeout(evtims[i], adapter);
-		if (unlikely(ret == -1)) {
-			__atomic_store_n(&evtims[i]->state,
-					RTE_EVENT_TIMER_ERROR_TOOLATE,
-					__ATOMIC_RELAXED);
-			rte_errno = EINVAL;
-			break;
-		} else if (unlikely(ret == -2)) {
-			__atomic_store_n(&evtims[i]->state,
-					RTE_EVENT_TIMER_ERROR_TOOEARLY,
-					__ATOMIC_RELAXED);
-			rte_errno = EINVAL;
-			break;
-		}
-
 		if (unlikely(check_destination_event_queue(evtims[i],
 							   adapter) < 0)) {
 			__atomic_store_n(&evtims[i]->state,
@@ -1225,7 +1209,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
 
-		cycles = get_timeout_cycles(evtims[i], adapter);
+		ret = get_timeout_cycles(evtims[i], adapter, &cycles);
+		if (unlikely(ret == -1)) {
+			__atomic_store_n(&evtims[i]->state,
+					RTE_EVENT_TIMER_ERROR_TOOLATE,
+					__ATOMIC_RELAXED);
+			rte_errno = EINVAL;
+			break;
+		} else if (unlikely(ret == -2)) {
+			__atomic_store_n(&evtims[i]->state,
+					RTE_EVENT_TIMER_ERROR_TOOEARLY,
+					__ATOMIC_RELAXED);
+			rte_errno = EINVAL;
+			break;
+		}
+
 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
 					  type, lcore_id, NULL, evtims[i]);
 		if (ret < 0) {
-- 
2.23.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2] eventdev/timer: fix overflow issue
  2023-01-24 16:09 [PATCH] eventdev/timer: fix overflow issue Erik Gabriel Carrillo
@ 2023-01-24 20:45 ` Erik Gabriel Carrillo
  2023-02-07  6:00   ` Jerin Jacob
  2023-02-09 15:13   ` [PATCH v3] " Erik Gabriel Carrillo
  2023-01-25  2:47 ` [PATCH] " Stephen Hemminger
  1 sibling, 2 replies; 9+ messages in thread
From: Erik Gabriel Carrillo @ 2023-01-24 20:45 UTC (permalink / raw)
  To: jerinj; +Cc: dev, stable

The software timer adapter converts event timer timeout ticks to a
number of CPU cycles at which an rte_timer should expire. The
computation uses integer operations that can result in overflow.

Use floating point operations instead to perform the computation, and
convert the final result back to an integer type when returning. Also
move the logic that checks the timeout range into the function that
performs the above computation.

Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
Cc: stable@dpdk.org

Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
---
v2:
* Fix implicit int to float conversion build warning on Clang

 lib/eventdev/rte_event_timer_adapter.c | 71 ++++++++++++--------------
 1 file changed, 34 insertions(+), 37 deletions(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index d357f7403a..2efeb73bce 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -719,13 +719,28 @@ swtim_callback(struct rte_timer *tim)
 	}
 }
 
-static __rte_always_inline uint64_t
+static __rte_always_inline int
 get_timeout_cycles(struct rte_event_timer *evtim,
-		   const struct rte_event_timer_adapter *adapter)
+		   const struct rte_event_timer_adapter *adapter,
+		   uint64_t *timeout_cycles)
 {
 	struct swtim *sw = swtim_pmd_priv(adapter);
-	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
-	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
+	uint64_t timeout_nsecs;
+	double cycles_per_nsec;
+
+	cycles_per_nsec = (double)rte_get_timer_hz() / NSECPERSEC;
+
+	timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
+	if (timeout_nsecs > sw->max_tmo_ns)
+		return -1;
+	if (timeout_nsecs < sw->timer_tick_ns)
+		return -2;
+
+	if (timeout_cycles)
+		*timeout_cycles = (uint64_t)ceil(timeout_nsecs *
+						 cycles_per_nsec);
+
+	return 0;
 }
 
 /* This function returns true if one or more (adapter) ticks have occurred since
@@ -759,23 +774,6 @@ swtim_did_tick(struct swtim *sw)
 	return false;
 }
 
-/* Check that event timer timeout value is in range */
-static __rte_always_inline int
-check_timeout(struct rte_event_timer *evtim,
-	      const struct rte_event_timer_adapter *adapter)
-{
-	uint64_t tmo_nsec;
-	struct swtim *sw = swtim_pmd_priv(adapter);
-
-	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
-	if (tmo_nsec > sw->max_tmo_ns)
-		return -1;
-	if (tmo_nsec < sw->timer_tick_ns)
-		return -2;
-
-	return 0;
-}
-
 /* Check that event timer event queue sched type matches destination event queue
  * sched type
  */
@@ -1195,21 +1193,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
 			break;
 		}
 
-		ret = check_timeout(evtims[i], adapter);
-		if (unlikely(ret == -1)) {
-			__atomic_store_n(&evtims[i]->state,
-					RTE_EVENT_TIMER_ERROR_TOOLATE,
-					__ATOMIC_RELAXED);
-			rte_errno = EINVAL;
-			break;
-		} else if (unlikely(ret == -2)) {
-			__atomic_store_n(&evtims[i]->state,
-					RTE_EVENT_TIMER_ERROR_TOOEARLY,
-					__ATOMIC_RELAXED);
-			rte_errno = EINVAL;
-			break;
-		}
-
 		if (unlikely(check_destination_event_queue(evtims[i],
 							   adapter) < 0)) {
 			__atomic_store_n(&evtims[i]->state,
@@ -1225,7 +1208,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
 
-		cycles = get_timeout_cycles(evtims[i], adapter);
+		ret = get_timeout_cycles(evtims[i], adapter, &cycles);
+		if (unlikely(ret == -1)) {
+			__atomic_store_n(&evtims[i]->state,
+					RTE_EVENT_TIMER_ERROR_TOOLATE,
+					__ATOMIC_RELAXED);
+			rte_errno = EINVAL;
+			break;
+		} else if (unlikely(ret == -2)) {
+			__atomic_store_n(&evtims[i]->state,
+					RTE_EVENT_TIMER_ERROR_TOOEARLY,
+					__ATOMIC_RELAXED);
+			rte_errno = EINVAL;
+			break;
+		}
+
 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
 					  type, lcore_id, NULL, evtims[i]);
 		if (ret < 0) {
-- 
2.23.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] eventdev/timer: fix overflow issue
  2023-01-24 16:09 [PATCH] eventdev/timer: fix overflow issue Erik Gabriel Carrillo
  2023-01-24 20:45 ` [PATCH v2] " Erik Gabriel Carrillo
@ 2023-01-25  2:47 ` Stephen Hemminger
  2023-01-25  5:07   ` Jerin Jacob
  1 sibling, 1 reply; 9+ messages in thread
From: Stephen Hemminger @ 2023-01-25  2:47 UTC (permalink / raw)
  To: Erik Gabriel Carrillo; +Cc: jerinj, dev, stable

On Tue, 24 Jan 2023 10:09:45 -0600
Erik Gabriel Carrillo <erik.g.carrillo@intel.com> wrote:

> The software timer adapter converts event timer timeout ticks to a
> number of CPU cycles at which an rte_timer should expire. The
> computation uses integer operations that can result in overflow.
> 
> Use floating point operations instead to perform the computation, and
> convert the final result back to an integer type when returning. Also
> move the logic that checks the timeout range into the function that
> performs the above computation.
> 
> Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
> Cc: stable@dpdk.org

Don't like this solution.
Floating point is slow and inaccurate.
You can do it with fixed point math if you are careful.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] eventdev/timer: fix overflow issue
  2023-01-25  2:47 ` [PATCH] " Stephen Hemminger
@ 2023-01-25  5:07   ` Jerin Jacob
  2023-01-25 14:53     ` Stephen Hemminger
  0 siblings, 1 reply; 9+ messages in thread
From: Jerin Jacob @ 2023-01-25  5:07 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: Erik Gabriel Carrillo, jerinj, dev, stable

On Wed, Jan 25, 2023 at 8:17 AM Stephen Hemminger
<stephen@networkplumber.org> wrote:
>
> On Tue, 24 Jan 2023 10:09:45 -0600
> Erik Gabriel Carrillo <erik.g.carrillo@intel.com> wrote:
>
> > The software timer adapter converts event timer timeout ticks to a
> > number of CPU cycles at which an rte_timer should expire. The
> > computation uses integer operations that can result in overflow.
> >
> > Use floating point operations instead to perform the computation, and
> > convert the final result back to an integer type when returning. Also
> > move the logic that checks the timeout range into the function that
> > performs the above computation.
> >
> > Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
> > Cc: stable@dpdk.org
>
> Don't like this solution.
> Floating point is slow and inaccurate.
> You can do it with fixed point math if you are careful.

Looks like Stephan replied to v1 hence comment is not showing up here
https://patches.dpdk.org/project/dpdk/patch/20230124204555.3022361-1-erik.g.carrillo@intel.com/
@Erik Gabriel Carrillo Can be following moved to slow path ?
+ cycles_per_nsec = (double)rte_get_timer_hz() / NSECPERSEC;

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] eventdev/timer: fix overflow issue
  2023-01-25  5:07   ` Jerin Jacob
@ 2023-01-25 14:53     ` Stephen Hemminger
  0 siblings, 0 replies; 9+ messages in thread
From: Stephen Hemminger @ 2023-01-25 14:53 UTC (permalink / raw)
  To: Jerin Jacob; +Cc: Erik Gabriel Carrillo, jerinj, dev, stable

On Wed, 25 Jan 2023 10:37:16 +0530
Jerin Jacob <jerinjacobk@gmail.com> wrote:

> Looks like Stephan replied to v1 hence comment is not showing up here
> https://patches.dpdk.org/project/dpdk/patch/20230124204555.3022361-1-erik.g.carrillo@intel.com/
> @Erik Gabriel Carrillo Can be following moved to slow path ?
> + cycles_per_nsec = (double)rte_get_timer_hz() / NSECPERSEC;

Best solution is to use rte_reciprocal 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2] eventdev/timer: fix overflow issue
  2023-01-24 20:45 ` [PATCH v2] " Erik Gabriel Carrillo
@ 2023-02-07  6:00   ` Jerin Jacob
  2023-02-07 22:02     ` Carrillo, Erik G
  2023-02-09 15:13   ` [PATCH v3] " Erik Gabriel Carrillo
  1 sibling, 1 reply; 9+ messages in thread
From: Jerin Jacob @ 2023-02-07  6:00 UTC (permalink / raw)
  To: Erik Gabriel Carrillo, Stephen Hemminger; +Cc: jerinj, dev, stable

On Wed, Jan 25, 2023 at 2:16 AM Erik Gabriel Carrillo
<erik.g.carrillo@intel.com> wrote:
>
> The software timer adapter converts event timer timeout ticks to a
> number of CPU cycles at which an rte_timer should expire. The
> computation uses integer operations that can result in overflow.
>
> Use floating point operations instead to perform the computation, and
> convert the final result back to an integer type when returning. Also
> move the logic that checks the timeout range into the function that
> performs the above computation.
>
> Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
> Cc: stable@dpdk.org
>
> Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
> ---
> v2:
> * Fix implicit int to float conversion build warning on Clang

Not yet addressed the @Stephen Hemminger  comments on using of
rte_reciprocal. Also means to check what compute can be moved to
slowpath.
Marking as "Change requested", Looking for next version.


>
>  lib/eventdev/rte_event_timer_adapter.c | 71 ++++++++++++--------------
>  1 file changed, 34 insertions(+), 37 deletions(-)
>
> diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
> index d357f7403a..2efeb73bce 100644
> --- a/lib/eventdev/rte_event_timer_adapter.c
> +++ b/lib/eventdev/rte_event_timer_adapter.c
> @@ -719,13 +719,28 @@ swtim_callback(struct rte_timer *tim)
>         }
>  }
>
> -static __rte_always_inline uint64_t
> +static __rte_always_inline int
>  get_timeout_cycles(struct rte_event_timer *evtim,
> -                  const struct rte_event_timer_adapter *adapter)
> +                  const struct rte_event_timer_adapter *adapter,
> +                  uint64_t *timeout_cycles)
>  {
>         struct swtim *sw = swtim_pmd_priv(adapter);
> -       uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
> -       return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
> +       uint64_t timeout_nsecs;
> +       double cycles_per_nsec;
> +
> +       cycles_per_nsec = (double)rte_get_timer_hz() / NSECPERSEC;
> +
> +       timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
> +       if (timeout_nsecs > sw->max_tmo_ns)
> +               return -1;
> +       if (timeout_nsecs < sw->timer_tick_ns)
> +               return -2;
> +
> +       if (timeout_cycles)
> +               *timeout_cycles = (uint64_t)ceil(timeout_nsecs *
> +                                                cycles_per_nsec);
> +
> +       return 0;
>  }
>
>  /* This function returns true if one or more (adapter) ticks have occurred since
> @@ -759,23 +774,6 @@ swtim_did_tick(struct swtim *sw)
>         return false;
>  }
>
> -/* Check that event timer timeout value is in range */
> -static __rte_always_inline int
> -check_timeout(struct rte_event_timer *evtim,
> -             const struct rte_event_timer_adapter *adapter)
> -{
> -       uint64_t tmo_nsec;
> -       struct swtim *sw = swtim_pmd_priv(adapter);
> -
> -       tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
> -       if (tmo_nsec > sw->max_tmo_ns)
> -               return -1;
> -       if (tmo_nsec < sw->timer_tick_ns)
> -               return -2;
> -
> -       return 0;
> -}
> -
>  /* Check that event timer event queue sched type matches destination event queue
>   * sched type
>   */
> @@ -1195,21 +1193,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
>                         break;
>                 }
>
> -               ret = check_timeout(evtims[i], adapter);
> -               if (unlikely(ret == -1)) {
> -                       __atomic_store_n(&evtims[i]->state,
> -                                       RTE_EVENT_TIMER_ERROR_TOOLATE,
> -                                       __ATOMIC_RELAXED);
> -                       rte_errno = EINVAL;
> -                       break;
> -               } else if (unlikely(ret == -2)) {
> -                       __atomic_store_n(&evtims[i]->state,
> -                                       RTE_EVENT_TIMER_ERROR_TOOEARLY,
> -                                       __ATOMIC_RELAXED);
> -                       rte_errno = EINVAL;
> -                       break;
> -               }
> -
>                 if (unlikely(check_destination_event_queue(evtims[i],
>                                                            adapter) < 0)) {
>                         __atomic_store_n(&evtims[i]->state,
> @@ -1225,7 +1208,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
>                 evtims[i]->impl_opaque[0] = (uintptr_t)tim;
>                 evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
>
> -               cycles = get_timeout_cycles(evtims[i], adapter);
> +               ret = get_timeout_cycles(evtims[i], adapter, &cycles);
> +               if (unlikely(ret == -1)) {
> +                       __atomic_store_n(&evtims[i]->state,
> +                                       RTE_EVENT_TIMER_ERROR_TOOLATE,
> +                                       __ATOMIC_RELAXED);
> +                       rte_errno = EINVAL;
> +                       break;
> +               } else if (unlikely(ret == -2)) {
> +                       __atomic_store_n(&evtims[i]->state,
> +                                       RTE_EVENT_TIMER_ERROR_TOOEARLY,
> +                                       __ATOMIC_RELAXED);
> +                       rte_errno = EINVAL;
> +                       break;
> +               }
> +
>                 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
>                                           type, lcore_id, NULL, evtims[i]);
>                 if (ret < 0) {
> --
> 2.23.0
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* RE: [PATCH v2] eventdev/timer: fix overflow issue
  2023-02-07  6:00   ` Jerin Jacob
@ 2023-02-07 22:02     ` Carrillo, Erik G
  0 siblings, 0 replies; 9+ messages in thread
From: Carrillo, Erik G @ 2023-02-07 22:02 UTC (permalink / raw)
  To: Jerin Jacob, Stephen Hemminger; +Cc: jerinj, dev, stable



> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Tuesday, February 7, 2023 12:01 AM
> To: Carrillo, Erik G <erik.g.carrillo@intel.com>; Stephen Hemminger
> <stephen@networkplumber.org>
> Cc: jerinj@marvell.com; dev@dpdk.org; stable@dpdk.org
> Subject: Re: [PATCH v2] eventdev/timer: fix overflow issue
> 
> On Wed, Jan 25, 2023 at 2:16 AM Erik Gabriel Carrillo
> <erik.g.carrillo@intel.com> wrote:
> >
> > The software timer adapter converts event timer timeout ticks to a
> > number of CPU cycles at which an rte_timer should expire. The
> > computation uses integer operations that can result in overflow.
> >
> > Use floating point operations instead to perform the computation, and
> > convert the final result back to an integer type when returning. Also
> > move the logic that checks the timeout range into the function that
> > performs the above computation.
> >
> > Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
> > ---
> > v2:
> > * Fix implicit int to float conversion build warning on Clang
> 
> Not yet addressed the @Stephen Hemminger  comments on using of
> rte_reciprocal. Also means to check what compute can be moved to
> slowpath.
> Marking as "Change requested", Looking for next version.
> 
> 

Ok - thanks for the pointer to rte_reciprocal.   I'll rework and submit a new version.

Regards,
Erik

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v3] eventdev/timer: fix overflow issue
  2023-01-24 20:45 ` [PATCH v2] " Erik Gabriel Carrillo
  2023-02-07  6:00   ` Jerin Jacob
@ 2023-02-09 15:13   ` Erik Gabriel Carrillo
  2023-02-10  6:47     ` Jerin Jacob
  1 sibling, 1 reply; 9+ messages in thread
From: Erik Gabriel Carrillo @ 2023-02-09 15:13 UTC (permalink / raw)
  To: jerinj; +Cc: stephen, dev, stable

The software timer adapter converts event timer timeout ticks to a
number of TSC cycles at which an rte_timer should expire. The
computation uses an integer multiplication that can result in overflow.

If necessary, reduce the timeout_nsecs value by the number of whole
seconds it contains to keep the value of the multiplier within a
range that will not result in overflow.  Add the saved value back later
to get the final result. Also, move the logic that checks the timeout
range into the function that performs the above computation.

Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
Cc: stable@dpdk.org

Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
---
v3:
* Use integer operations instead of floating point, and use
  rte_reciprocal_divide() for division.

v2:
* Fix implicit int to float conversion build warning on Clang

 lib/eventdev/rte_event_timer_adapter.c | 97 ++++++++++++++++----------
 1 file changed, 59 insertions(+), 38 deletions(-)

diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
index 7f4f347369..23eb1d4a7d 100644
--- a/lib/eventdev/rte_event_timer_adapter.c
+++ b/lib/eventdev/rte_event_timer_adapter.c
@@ -18,6 +18,7 @@
 #include <rte_timer.h>
 #include <rte_service_component.h>
 #include <rte_telemetry.h>
+#include <rte_reciprocal.h>
 
 #include "event_timer_adapter_pmd.h"
 #include "eventdev_pmd.h"
@@ -734,13 +735,51 @@ swtim_callback(struct rte_timer *tim)
 	}
 }
 
-static __rte_always_inline uint64_t
+static __rte_always_inline int
 get_timeout_cycles(struct rte_event_timer *evtim,
-		   const struct rte_event_timer_adapter *adapter)
+		   const struct rte_event_timer_adapter *adapter,
+		   uint64_t *timeout_cycles)
 {
-	struct swtim *sw = swtim_pmd_priv(adapter);
-	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
-	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
+	static struct rte_reciprocal_u64 nsecpersec_inverse;
+	static uint64_t timer_hz;
+	uint64_t rem_cycles, secs_cycles = 0;
+	uint64_t secs, timeout_nsecs;
+	uint64_t nsecpersec;
+	struct swtim *sw;
+
+	sw = swtim_pmd_priv(adapter);
+	nsecpersec = (uint64_t)NSECPERSEC;
+
+	timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
+	if (timeout_nsecs > sw->max_tmo_ns)
+		return -1;
+	if (timeout_nsecs < sw->timer_tick_ns)
+		return -2;
+
+	/* Set these values in the first invocation */
+	if (!timer_hz) {
+		timer_hz = rte_get_timer_hz();
+		nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec);
+	}
+
+	/* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number
+	 * of whole seconds it contains and convert that value to a number
+	 * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec)
+	 * in order to avoid overflow when we later multiply by timer_hz.
+	 */
+	if (timeout_nsecs > nsecpersec) {
+		secs = rte_reciprocal_divide_u64(timeout_nsecs,
+						 &nsecpersec_inverse);
+		secs_cycles = secs * timer_hz;
+		timeout_nsecs -= secs * nsecpersec;
+	}
+
+	rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz,
+					       &nsecpersec_inverse);
+
+	*timeout_cycles = secs_cycles + rem_cycles;
+
+	return 0;
 }
 
 /* This function returns true if one or more (adapter) ticks have occurred since
@@ -774,23 +813,6 @@ swtim_did_tick(struct swtim *sw)
 	return false;
 }
 
-/* Check that event timer timeout value is in range */
-static __rte_always_inline int
-check_timeout(struct rte_event_timer *evtim,
-	      const struct rte_event_timer_adapter *adapter)
-{
-	uint64_t tmo_nsec;
-	struct swtim *sw = swtim_pmd_priv(adapter);
-
-	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
-	if (tmo_nsec > sw->max_tmo_ns)
-		return -1;
-	if (tmo_nsec < sw->timer_tick_ns)
-		return -2;
-
-	return 0;
-}
-
 /* Check that event timer event queue sched type matches destination event queue
  * sched type
  */
@@ -1210,21 +1232,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
 			break;
 		}
 
-		ret = check_timeout(evtims[i], adapter);
-		if (unlikely(ret == -1)) {
-			__atomic_store_n(&evtims[i]->state,
-					RTE_EVENT_TIMER_ERROR_TOOLATE,
-					__ATOMIC_RELAXED);
-			rte_errno = EINVAL;
-			break;
-		} else if (unlikely(ret == -2)) {
-			__atomic_store_n(&evtims[i]->state,
-					RTE_EVENT_TIMER_ERROR_TOOEARLY,
-					__ATOMIC_RELAXED);
-			rte_errno = EINVAL;
-			break;
-		}
-
 		if (unlikely(check_destination_event_queue(evtims[i],
 							   adapter) < 0)) {
 			__atomic_store_n(&evtims[i]->state,
@@ -1240,7 +1247,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
 
-		cycles = get_timeout_cycles(evtims[i], adapter);
+		ret = get_timeout_cycles(evtims[i], adapter, &cycles);
+		if (unlikely(ret == -1)) {
+			__atomic_store_n(&evtims[i]->state,
+					RTE_EVENT_TIMER_ERROR_TOOLATE,
+					__ATOMIC_RELAXED);
+			rte_errno = EINVAL;
+			break;
+		} else if (unlikely(ret == -2)) {
+			__atomic_store_n(&evtims[i]->state,
+					RTE_EVENT_TIMER_ERROR_TOOEARLY,
+					__ATOMIC_RELAXED);
+			rte_errno = EINVAL;
+			break;
+		}
+
 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
 					  type, lcore_id, NULL, evtims[i]);
 		if (ret < 0) {
-- 
2.23.0


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3] eventdev/timer: fix overflow issue
  2023-02-09 15:13   ` [PATCH v3] " Erik Gabriel Carrillo
@ 2023-02-10  6:47     ` Jerin Jacob
  0 siblings, 0 replies; 9+ messages in thread
From: Jerin Jacob @ 2023-02-10  6:47 UTC (permalink / raw)
  To: Erik Gabriel Carrillo; +Cc: jerinj, stephen, dev, stable

On Thu, Feb 9, 2023 at 8:44 PM Erik Gabriel Carrillo
<erik.g.carrillo@intel.com> wrote:
>
> The software timer adapter converts event timer timeout ticks to a
> number of TSC cycles at which an rte_timer should expire. The
> computation uses an integer multiplication that can result in overflow.
>
> If necessary, reduce the timeout_nsecs value by the number of whole
> seconds it contains to keep the value of the multiplier within a
> range that will not result in overflow.  Add the saved value back later
> to get the final result. Also, move the logic that checks the timeout
> range into the function that performs the above computation.
>
> Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
> Cc: stable@dpdk.org
>
> Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>

Applied to dpdk-next-net-eventdev/for-main. Thanks


> ---
> v3:
> * Use integer operations instead of floating point, and use
>   rte_reciprocal_divide() for division.
>
> v2:
> * Fix implicit int to float conversion build warning on Clang
>
>  lib/eventdev/rte_event_timer_adapter.c | 97 ++++++++++++++++----------
>  1 file changed, 59 insertions(+), 38 deletions(-)
>
> diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
> index 7f4f347369..23eb1d4a7d 100644
> --- a/lib/eventdev/rte_event_timer_adapter.c
> +++ b/lib/eventdev/rte_event_timer_adapter.c
> @@ -18,6 +18,7 @@
>  #include <rte_timer.h>
>  #include <rte_service_component.h>
>  #include <rte_telemetry.h>
> +#include <rte_reciprocal.h>
>
>  #include "event_timer_adapter_pmd.h"
>  #include "eventdev_pmd.h"
> @@ -734,13 +735,51 @@ swtim_callback(struct rte_timer *tim)
>         }
>  }
>
> -static __rte_always_inline uint64_t
> +static __rte_always_inline int
>  get_timeout_cycles(struct rte_event_timer *evtim,
> -                  const struct rte_event_timer_adapter *adapter)
> +                  const struct rte_event_timer_adapter *adapter,
> +                  uint64_t *timeout_cycles)
>  {
> -       struct swtim *sw = swtim_pmd_priv(adapter);
> -       uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
> -       return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
> +       static struct rte_reciprocal_u64 nsecpersec_inverse;
> +       static uint64_t timer_hz;
> +       uint64_t rem_cycles, secs_cycles = 0;
> +       uint64_t secs, timeout_nsecs;
> +       uint64_t nsecpersec;
> +       struct swtim *sw;
> +
> +       sw = swtim_pmd_priv(adapter);
> +       nsecpersec = (uint64_t)NSECPERSEC;
> +
> +       timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
> +       if (timeout_nsecs > sw->max_tmo_ns)
> +               return -1;
> +       if (timeout_nsecs < sw->timer_tick_ns)
> +               return -2;
> +
> +       /* Set these values in the first invocation */
> +       if (!timer_hz) {
> +               timer_hz = rte_get_timer_hz();
> +               nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec);
> +       }
> +
> +       /* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number
> +        * of whole seconds it contains and convert that value to a number
> +        * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec)
> +        * in order to avoid overflow when we later multiply by timer_hz.
> +        */
> +       if (timeout_nsecs > nsecpersec) {
> +               secs = rte_reciprocal_divide_u64(timeout_nsecs,
> +                                                &nsecpersec_inverse);
> +               secs_cycles = secs * timer_hz;
> +               timeout_nsecs -= secs * nsecpersec;
> +       }
> +
> +       rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz,
> +                                              &nsecpersec_inverse);
> +
> +       *timeout_cycles = secs_cycles + rem_cycles;
> +
> +       return 0;
>  }
>
>  /* This function returns true if one or more (adapter) ticks have occurred since
> @@ -774,23 +813,6 @@ swtim_did_tick(struct swtim *sw)
>         return false;
>  }
>
> -/* Check that event timer timeout value is in range */
> -static __rte_always_inline int
> -check_timeout(struct rte_event_timer *evtim,
> -             const struct rte_event_timer_adapter *adapter)
> -{
> -       uint64_t tmo_nsec;
> -       struct swtim *sw = swtim_pmd_priv(adapter);
> -
> -       tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
> -       if (tmo_nsec > sw->max_tmo_ns)
> -               return -1;
> -       if (tmo_nsec < sw->timer_tick_ns)
> -               return -2;
> -
> -       return 0;
> -}
> -
>  /* Check that event timer event queue sched type matches destination event queue
>   * sched type
>   */
> @@ -1210,21 +1232,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
>                         break;
>                 }
>
> -               ret = check_timeout(evtims[i], adapter);
> -               if (unlikely(ret == -1)) {
> -                       __atomic_store_n(&evtims[i]->state,
> -                                       RTE_EVENT_TIMER_ERROR_TOOLATE,
> -                                       __ATOMIC_RELAXED);
> -                       rte_errno = EINVAL;
> -                       break;
> -               } else if (unlikely(ret == -2)) {
> -                       __atomic_store_n(&evtims[i]->state,
> -                                       RTE_EVENT_TIMER_ERROR_TOOEARLY,
> -                                       __ATOMIC_RELAXED);
> -                       rte_errno = EINVAL;
> -                       break;
> -               }
> -
>                 if (unlikely(check_destination_event_queue(evtims[i],
>                                                            adapter) < 0)) {
>                         __atomic_store_n(&evtims[i]->state,
> @@ -1240,7 +1247,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
>                 evtims[i]->impl_opaque[0] = (uintptr_t)tim;
>                 evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
>
> -               cycles = get_timeout_cycles(evtims[i], adapter);
> +               ret = get_timeout_cycles(evtims[i], adapter, &cycles);
> +               if (unlikely(ret == -1)) {
> +                       __atomic_store_n(&evtims[i]->state,
> +                                       RTE_EVENT_TIMER_ERROR_TOOLATE,
> +                                       __ATOMIC_RELAXED);
> +                       rte_errno = EINVAL;
> +                       break;
> +               } else if (unlikely(ret == -2)) {
> +                       __atomic_store_n(&evtims[i]->state,
> +                                       RTE_EVENT_TIMER_ERROR_TOOEARLY,
> +                                       __ATOMIC_RELAXED);
> +                       rte_errno = EINVAL;
> +                       break;
> +               }
> +
>                 ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
>                                           type, lcore_id, NULL, evtims[i]);
>                 if (ret < 0) {
> --
> 2.23.0
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2023-02-10  6:47 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-24 16:09 [PATCH] eventdev/timer: fix overflow issue Erik Gabriel Carrillo
2023-01-24 20:45 ` [PATCH v2] " Erik Gabriel Carrillo
2023-02-07  6:00   ` Jerin Jacob
2023-02-07 22:02     ` Carrillo, Erik G
2023-02-09 15:13   ` [PATCH v3] " Erik Gabriel Carrillo
2023-02-10  6:47     ` Jerin Jacob
2023-01-25  2:47 ` [PATCH] " Stephen Hemminger
2023-01-25  5:07   ` Jerin Jacob
2023-01-25 14:53     ` Stephen Hemminger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).