From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 06AA041C53 for ; Thu, 9 Feb 2023 16:14:03 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id F25CD42670; Thu, 9 Feb 2023 16:14:02 +0100 (CET) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id DF2AE4161A; Thu, 9 Feb 2023 16:14:01 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1675955642; x=1707491642; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=G2OhZDQLZiretVkJbisypAtzVHyLnLgJqLajM2HFIC0=; b=QAZlMyiQvi5XapfFGZoG0u9fA1lc9XNr8i6bK8VB5HRjda3HcrgSRVbx dVysDWhBBGJeoyQBkO9O1UI8e6c3siPq1qerIVlmEBGOBNsLGM9O/h8N3 HVgkZMN2EBWj4gQ3AnkctcHht8JJOEBzkDlwc+AXdjDSN1Zxi3vo5ytUK 1d6xOJk5G1YJrLQhF6WHoeEw28z9F0sDkF6H75lCNUuQpiX3H6BKZPNOm D56HZYeYq/rw37p6AGxFz+eG3hmyRJtHGZpceODlCEc9qQH9sOhUjfcT0 szjKIX6mpFQKNplNDiHxB/+eRNvRtswa6NFXvlfjwULXVY/PCY9V+Lw0R w==; X-IronPort-AV: E=McAfee;i="6500,9779,10615"; a="357533444" X-IronPort-AV: E=Sophos;i="5.97,284,1669104000"; d="scan'208";a="357533444" Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Feb 2023 07:14:00 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10615"; a="667695793" X-IronPort-AV: E=Sophos;i="5.97,284,1669104000"; d="scan'208";a="667695793" Received: from txandevlnx321.an.intel.com ([10.123.117.43]) by orsmga002.jf.intel.com with ESMTP; 09 Feb 2023 07:14:00 -0800 From: Erik Gabriel Carrillo To: jerinj@marvell.com Cc: stephen@networkplumber.org, dev@dpdk.org, stable@dpdk.org Subject: [PATCH v3] eventdev/timer: fix overflow issue Date: Thu, 9 Feb 2023 09:13:49 -0600 Message-Id: <20230209151349.474358-1-erik.g.carrillo@intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20230124204555.3022361-1-erik.g.carrillo@intel.com> References: <20230124204555.3022361-1-erik.g.carrillo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org The software timer adapter converts event timer timeout ticks to a number of TSC cycles at which an rte_timer should expire. The computation uses an integer multiplication that can result in overflow. If necessary, reduce the timeout_nsecs value by the number of whole seconds it contains to keep the value of the multiplier within a range that will not result in overflow. Add the saved value back later to get the final result. Also, move the logic that checks the timeout range into the function that performs the above computation. Fixes: 6750b21bd6af ("eventdev: add default software timer adapter") Cc: stable@dpdk.org Signed-off-by: Erik Gabriel Carrillo --- v3: * Use integer operations instead of floating point, and use rte_reciprocal_divide() for division. v2: * Fix implicit int to float conversion build warning on Clang lib/eventdev/rte_event_timer_adapter.c | 97 ++++++++++++++++---------- 1 file changed, 59 insertions(+), 38 deletions(-) diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c index 7f4f347369..23eb1d4a7d 100644 --- a/lib/eventdev/rte_event_timer_adapter.c +++ b/lib/eventdev/rte_event_timer_adapter.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "event_timer_adapter_pmd.h" #include "eventdev_pmd.h" @@ -734,13 +735,51 @@ swtim_callback(struct rte_timer *tim) } } -static __rte_always_inline uint64_t +static __rte_always_inline int get_timeout_cycles(struct rte_event_timer *evtim, - const struct rte_event_timer_adapter *adapter) + const struct rte_event_timer_adapter *adapter, + uint64_t *timeout_cycles) { - struct swtim *sw = swtim_pmd_priv(adapter); - uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns; - return timeout_ns * rte_get_timer_hz() / NSECPERSEC; + static struct rte_reciprocal_u64 nsecpersec_inverse; + static uint64_t timer_hz; + uint64_t rem_cycles, secs_cycles = 0; + uint64_t secs, timeout_nsecs; + uint64_t nsecpersec; + struct swtim *sw; + + sw = swtim_pmd_priv(adapter); + nsecpersec = (uint64_t)NSECPERSEC; + + timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns; + if (timeout_nsecs > sw->max_tmo_ns) + return -1; + if (timeout_nsecs < sw->timer_tick_ns) + return -2; + + /* Set these values in the first invocation */ + if (!timer_hz) { + timer_hz = rte_get_timer_hz(); + nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec); + } + + /* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number + * of whole seconds it contains and convert that value to a number + * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec) + * in order to avoid overflow when we later multiply by timer_hz. + */ + if (timeout_nsecs > nsecpersec) { + secs = rte_reciprocal_divide_u64(timeout_nsecs, + &nsecpersec_inverse); + secs_cycles = secs * timer_hz; + timeout_nsecs -= secs * nsecpersec; + } + + rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz, + &nsecpersec_inverse); + + *timeout_cycles = secs_cycles + rem_cycles; + + return 0; } /* This function returns true if one or more (adapter) ticks have occurred since @@ -774,23 +813,6 @@ swtim_did_tick(struct swtim *sw) return false; } -/* Check that event timer timeout value is in range */ -static __rte_always_inline int -check_timeout(struct rte_event_timer *evtim, - const struct rte_event_timer_adapter *adapter) -{ - uint64_t tmo_nsec; - struct swtim *sw = swtim_pmd_priv(adapter); - - tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns; - if (tmo_nsec > sw->max_tmo_ns) - return -1; - if (tmo_nsec < sw->timer_tick_ns) - return -2; - - return 0; -} - /* Check that event timer event queue sched type matches destination event queue * sched type */ @@ -1210,21 +1232,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, break; } - ret = check_timeout(evtims[i], adapter); - if (unlikely(ret == -1)) { - __atomic_store_n(&evtims[i]->state, - RTE_EVENT_TIMER_ERROR_TOOLATE, - __ATOMIC_RELAXED); - rte_errno = EINVAL; - break; - } else if (unlikely(ret == -2)) { - __atomic_store_n(&evtims[i]->state, - RTE_EVENT_TIMER_ERROR_TOOEARLY, - __ATOMIC_RELAXED); - rte_errno = EINVAL; - break; - } - if (unlikely(check_destination_event_queue(evtims[i], adapter) < 0)) { __atomic_store_n(&evtims[i]->state, @@ -1240,7 +1247,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, evtims[i]->impl_opaque[0] = (uintptr_t)tim; evtims[i]->impl_opaque[1] = (uintptr_t)adapter; - cycles = get_timeout_cycles(evtims[i], adapter); + ret = get_timeout_cycles(evtims[i], adapter, &cycles); + if (unlikely(ret == -1)) { + __atomic_store_n(&evtims[i]->state, + RTE_EVENT_TIMER_ERROR_TOOLATE, + __ATOMIC_RELAXED); + rte_errno = EINVAL; + break; + } else if (unlikely(ret == -2)) { + __atomic_store_n(&evtims[i]->state, + RTE_EVENT_TIMER_ERROR_TOOEARLY, + __ATOMIC_RELAXED); + rte_errno = EINVAL; + break; + } + ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles, type, lcore_id, NULL, evtims[i]); if (ret < 0) { -- 2.23.0