Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue. By default, defer queue size is big enough to hold all tbl8 groups. When enqueue fails, return error to the user to indicate system issue. Coverity issue: 360832 Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation") Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> --- lib/librte_lpm/rte_lpm.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index 2db9e16a2..a6d3a7894 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm) return group_idx; } -static void +static int tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) { struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; struct __rte_lpm *internal_lpm; + int rc = 0; internal_lpm = container_of(lpm, struct __rte_lpm, lpm); if (internal_lpm->v == NULL) { @@ -552,9 +553,13 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) __ATOMIC_RELAXED); } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { /* Push into QSBR defer queue. */ - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, + rc = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, (void *)&tbl8_group_start); + if (rc != 0) + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n"); } + + return rc; } static __rte_noinline int32_t @@ -1041,6 +1046,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, tbl8_range, i; int32_t tbl8_recycle_index; + int rc = 0; /* * Calculate the index into tbl24 and range. Note: All depths larger @@ -1097,7 +1103,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, */ lpm->tbl24[tbl24_index].valid = 0; __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free(lpm, tbl8_group_start); + rc = tbl8_free(lpm, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10 +1119,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, __ATOMIC_RELAXED); __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free(lpm, tbl8_group_start); + rc = tbl8_free(lpm, tbl8_group_start); } #undef group_idx - return 0; + return (int32_t)rc; } /* -- 2.17.1
Hi Ruifeng, On 16/07/2020 06:19, Ruifeng Wang wrote: > Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue. > By default, defer queue size is big enough to hold all tbl8 groups. When > enqueue fails, return error to the user to indicate system issue. > > Coverity issue: 360832 > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation") > > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> > --- > lib/librte_lpm/rte_lpm.c | 16 +++++++++++----- > 1 file changed, 11 insertions(+), 5 deletions(-) > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c > index 2db9e16a2..a6d3a7894 100644 > --- a/lib/librte_lpm/rte_lpm.c > +++ b/lib/librte_lpm/rte_lpm.c > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm) > return group_idx; > } > > -static void > +static int > tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) > { > struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; > struct __rte_lpm *internal_lpm; > + int rc = 0; > > internal_lpm = container_of(lpm, struct __rte_lpm, lpm); > if (internal_lpm->v == NULL) { > @@ -552,9 +553,13 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) > __ATOMIC_RELAXED); > } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { > /* Push into QSBR defer queue. */ > - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, > + rc = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, > (void *)&tbl8_group_start); On failure rte_rcu_qsbr_dq_enqueue() returns 1 and sets rte_errno. Consequently, rc value is propagated to delete_depth_big() -> rte_lpm_delete(), and on failure the latter returns "1" value, which conflicts with the LPM API: "0 on success, negative value otherwise" I would suggest here to return -rte_errno if rc is equal to 1. > + if (rc != 0) > + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n"); > } > + > + return rc; > } > > static __rte_noinline int32_t > @@ -1041,6 +1046,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, > uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, > tbl8_range, i; > int32_t tbl8_recycle_index; > + int rc = 0; > > /* > * Calculate the index into tbl24 and range. Note: All depths larger > @@ -1097,7 +1103,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, > */ > lpm->tbl24[tbl24_index].valid = 0; > __atomic_thread_fence(__ATOMIC_RELEASE); > - tbl8_free(lpm, tbl8_group_start); > + rc = tbl8_free(lpm, tbl8_group_start); > } else if (tbl8_recycle_index > -1) { > /* Update tbl24 entry. */ > struct rte_lpm_tbl_entry new_tbl24_entry = { > @@ -1113,10 +1119,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, > __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, > __ATOMIC_RELAXED); > __atomic_thread_fence(__ATOMIC_RELEASE); > - tbl8_free(lpm, tbl8_group_start); > + rc = tbl8_free(lpm, tbl8_group_start); > } > #undef group_idx > - return 0; > + return (int32_t)rc; > } > > /* > -- Regards, Vladimir
> -----Original Message----- > From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com> > Sent: Thursday, July 16, 2020 7:00 PM > To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson > <bruce.richardson@intel.com> > Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli > <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com> > Subject: Re: [PATCH] lpm: fix unchecked return value > > Hi Ruifeng, > > On 16/07/2020 06:19, Ruifeng Wang wrote: > > Coverity complains about unchecked return value of > rte_rcu_qsbr_dq_enqueue. > > By default, defer queue size is big enough to hold all tbl8 groups. > > When enqueue fails, return error to the user to indicate system issue. > > > > Coverity issue: 360832 > > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation") > > > > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> > > --- > > lib/librte_lpm/rte_lpm.c | 16 +++++++++++----- > > 1 file changed, 11 insertions(+), 5 deletions(-) > > > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index > > 2db9e16a2..a6d3a7894 100644 > > --- a/lib/librte_lpm/rte_lpm.c > > +++ b/lib/librte_lpm/rte_lpm.c > > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm) > > return group_idx; > > } > > > > -static void > > +static int > > tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) > > { > > struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; > > struct __rte_lpm *internal_lpm; > > + int rc = 0; > > > > internal_lpm = container_of(lpm, struct __rte_lpm, lpm); > > if (internal_lpm->v == NULL) { > > @@ -552,9 +553,13 @@ tbl8_free(struct rte_lpm *lpm, uint32_t > tbl8_group_start) > > __ATOMIC_RELAXED); > > } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { > > /* Push into QSBR defer queue. */ > > - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, > > + rc = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, > > (void *)&tbl8_group_start); > > On failure rte_rcu_qsbr_dq_enqueue() returns 1 and sets rte_errno. > Consequently, rc value is propagated to delete_depth_big() -> > rte_lpm_delete(), and on failure the latter returns "1" value, which conflicts > with the LPM API: > "0 on success, negative value otherwise" > I would suggest here to return -rte_errno if rc is equal to 1. > Yes, the return value is a little different from LPM APIs. Will change it in next version to keep consistent with other LPM APIs. Thanks. /Ruifeng > > > + if (rc != 0) > > + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n"); > > } > > + > > + return rc; > > } > > > > static __rte_noinline int32_t > > @@ -1041,6 +1046,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t > ip_masked, > > uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, > tbl8_index, > > tbl8_range, i; > > int32_t tbl8_recycle_index; > > + int rc = 0; > > > > /* > > * Calculate the index into tbl24 and range. Note: All depths > > larger @@ -1097,7 +1103,7 @@ delete_depth_big(struct rte_lpm *lpm, > uint32_t ip_masked, > > */ > > lpm->tbl24[tbl24_index].valid = 0; > > __atomic_thread_fence(__ATOMIC_RELEASE); > > - tbl8_free(lpm, tbl8_group_start); > > + rc = tbl8_free(lpm, tbl8_group_start); > > } else if (tbl8_recycle_index > -1) { > > /* Update tbl24 entry. */ > > struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10 > +1119,10 > > @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, > > __atomic_store(&lpm->tbl24[tbl24_index], > &new_tbl24_entry, > > __ATOMIC_RELAXED); > > __atomic_thread_fence(__ATOMIC_RELEASE); > > - tbl8_free(lpm, tbl8_group_start); > > + rc = tbl8_free(lpm, tbl8_group_start); > > } > > #undef group_idx > > - return 0; > > + return (int32_t)rc; > > } > > > > /* > > > > -- > Regards, > Vladimir
Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue. By default, defer queue size is big enough to hold all tbl8 groups. When enqueue fails, return error to the user to indicate system issue. Coverity issue: 360832 Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation") Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> --- v2: Converted return value to conform to LPM API convention. (Vladimir) lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index 2db9e16a2..757436f49 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm) return group_idx; } -static void +static int32_t tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) { struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; struct __rte_lpm *internal_lpm; + int status; internal_lpm = container_of(lpm, struct __rte_lpm, lpm); if (internal_lpm->v == NULL) { @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) __ATOMIC_RELAXED); } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { /* Push into QSBR defer queue. */ - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, + status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, (void *)&tbl8_group_start); + if (status == 1) { + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n"); + return -rte_errno; + } } + + return 0; } static __rte_noinline int32_t @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, #define group_idx next_hop uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, tbl8_range, i; - int32_t tbl8_recycle_index; + int32_t tbl8_recycle_index, status = 0; /* * Calculate the index into tbl24 and range. Note: All depths larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, */ lpm->tbl24[tbl24_index].valid = 0; __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free(lpm, tbl8_group_start); + status = tbl8_free(lpm, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10 +1120,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, __ATOMIC_RELAXED); __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free(lpm, tbl8_group_start); + status = tbl8_free(lpm, tbl8_group_start); } #undef group_idx - return 0; + return status; } /* -- 2.17.1
Hi Ruifeng, On 16/07/2020 16:49, Ruifeng Wang wrote: > Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue. > By default, defer queue size is big enough to hold all tbl8 groups. When > enqueue fails, return error to the user to indicate system issue. > > Coverity issue: 360832 > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation") > > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> > --- > v2: > Converted return value to conform to LPM API convention. (Vladimir) > > lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------ > 1 file changed, 13 insertions(+), 6 deletions(-) > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c > index 2db9e16a2..757436f49 100644 > --- a/lib/librte_lpm/rte_lpm.c > +++ b/lib/librte_lpm/rte_lpm.c > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm) > return group_idx; > } > > -static void > +static int32_t > tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) > { > struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; > struct __rte_lpm *internal_lpm; > + int status; > > internal_lpm = container_of(lpm, struct __rte_lpm, lpm); > if (internal_lpm->v == NULL) { > @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) > __ATOMIC_RELAXED); > } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { > /* Push into QSBR defer queue. */ > - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, > + status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, > (void *)&tbl8_group_start); > + if (status == 1) { > + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n"); > + return -rte_errno; > + } > } > + > + return 0; > } > > static __rte_noinline int32_t > @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, > #define group_idx next_hop > uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, > tbl8_range, i; > - int32_t tbl8_recycle_index; > + int32_t tbl8_recycle_index, status = 0; > > /* > * Calculate the index into tbl24 and range. Note: All depths larger > @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, > */ > lpm->tbl24[tbl24_index].valid = 0; > __atomic_thread_fence(__ATOMIC_RELEASE); > - tbl8_free(lpm, tbl8_group_start); > + status = tbl8_free(lpm, tbl8_group_start); > } else if (tbl8_recycle_index > -1) { > /* Update tbl24 entry. */ > struct rte_lpm_tbl_entry new_tbl24_entry = { > @@ -1113,10 +1120,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, > __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, > __ATOMIC_RELAXED); > __atomic_thread_fence(__ATOMIC_RELEASE); > - tbl8_free(lpm, tbl8_group_start); > + status = tbl8_free(lpm, tbl8_group_start); > } > #undef group_idx > - return 0; > + return status; This will change rte_lpm_delete API. As a suggestion, you can leave it as it was before ("return 0"), and send separate patch (with "return status)" which will be targeted to 20.11. > } > > /* > -- Regards, Vladimir
> -----Original Message----- > From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com> > Sent: Saturday, July 18, 2020 1:12 AM > To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson > <bruce.richardson@intel.com> > Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli > <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com> > Subject: Re: [PATCH v2] lpm: fix unchecked return value > > Hi Ruifeng, > Hi Vladimir, > On 16/07/2020 16:49, Ruifeng Wang wrote: > > Coverity complains about unchecked return value of > rte_rcu_qsbr_dq_enqueue. > > By default, defer queue size is big enough to hold all tbl8 groups. > > When enqueue fails, return error to the user to indicate system issue. > > > > Coverity issue: 360832 > > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation") > > > > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> > > --- > > v2: > > Converted return value to conform to LPM API convention. (Vladimir) > > > > lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------ > > 1 file changed, 13 insertions(+), 6 deletions(-) > > > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index > > 2db9e16a2..757436f49 100644 > > --- a/lib/librte_lpm/rte_lpm.c > > +++ b/lib/librte_lpm/rte_lpm.c > > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm) > > return group_idx; > > } > > > > -static void > > +static int32_t > > tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) > > { > > struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; > > struct __rte_lpm *internal_lpm; > > + int status; > > > > internal_lpm = container_of(lpm, struct __rte_lpm, lpm); > > if (internal_lpm->v == NULL) { > > @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t > tbl8_group_start) > > __ATOMIC_RELAXED); > > } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { > > /* Push into QSBR defer queue. */ > > - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, > > + status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, > > (void *)&tbl8_group_start); > > + if (status == 1) { > > + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n"); > > + return -rte_errno; > > + } > > } > > + > > + return 0; > > } > > > > static __rte_noinline int32_t > > @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t > ip_masked, > > #define group_idx next_hop > > uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, > tbl8_index, > > tbl8_range, i; > > - int32_t tbl8_recycle_index; > > + int32_t tbl8_recycle_index, status = 0; > > > > /* > > * Calculate the index into tbl24 and range. Note: All depths > > larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, > uint32_t ip_masked, > > */ > > lpm->tbl24[tbl24_index].valid = 0; > > __atomic_thread_fence(__ATOMIC_RELEASE); > > - tbl8_free(lpm, tbl8_group_start); > > + status = tbl8_free(lpm, tbl8_group_start); > > } else if (tbl8_recycle_index > -1) { > > /* Update tbl24 entry. */ > > struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10 > +1120,10 > > @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, > > __atomic_store(&lpm->tbl24[tbl24_index], > &new_tbl24_entry, > > __ATOMIC_RELAXED); > > __atomic_thread_fence(__ATOMIC_RELEASE); > > - tbl8_free(lpm, tbl8_group_start); > > + status = tbl8_free(lpm, tbl8_group_start); > > } > > #undef group_idx > > - return 0; > > + return status; > > This will change rte_lpm_delete API. As a suggestion, you can leave it as it > was before ("return 0"), and send separate patch (with "return status)" > which will be targeted to 20.11. > Is the change of API because a variable is returned instead of constant? The patch passed ABI check on Travis: http://mails.dpdk.org/archives/test-report/2020-July/144864.html So I didn't know there is API/ABI issue. Thanks. /Ruifeng > > } > > > > /* > > > > -- > Regards, > Vladimir
Hi Ruifeng, On 18/07/2020 10:22, Ruifeng Wang wrote: > >> -----Original Message----- >> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com> >> Sent: Saturday, July 18, 2020 1:12 AM >> To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson >> <bruce.richardson@intel.com> >> Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli >> <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com> >> Subject: Re: [PATCH v2] lpm: fix unchecked return value >> >> Hi Ruifeng, >> > Hi Vladimir, > >> On 16/07/2020 16:49, Ruifeng Wang wrote: >>> Coverity complains about unchecked return value of >> rte_rcu_qsbr_dq_enqueue. >>> By default, defer queue size is big enough to hold all tbl8 groups. >>> When enqueue fails, return error to the user to indicate system issue. >>> >>> Coverity issue: 360832 >>> Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation") >>> >>> Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> >>> --- >>> v2: >>> Converted return value to conform to LPM API convention. (Vladimir) >>> >>> lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------ >>> 1 file changed, 13 insertions(+), 6 deletions(-) >>> >>> diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index >>> 2db9e16a2..757436f49 100644 >>> --- a/lib/librte_lpm/rte_lpm.c >>> +++ b/lib/librte_lpm/rte_lpm.c >>> @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm) >>> return group_idx; >>> } >>> >>> -static void >>> +static int32_t >>> tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) >>> { >>> struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; >>> struct __rte_lpm *internal_lpm; >>> + int status; >>> >>> internal_lpm = container_of(lpm, struct __rte_lpm, lpm); >>> if (internal_lpm->v == NULL) { >>> @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t >> tbl8_group_start) >>> __ATOMIC_RELAXED); >>> } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { >>> /* Push into QSBR defer queue. */ >>> - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, >>> + status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, >>> (void *)&tbl8_group_start); >>> + if (status == 1) { >>> + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n"); >>> + return -rte_errno; >>> + } >>> } >>> + >>> + return 0; >>> } >>> >>> static __rte_noinline int32_t >>> @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t >> ip_masked, >>> #define group_idx next_hop >>> uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, >> tbl8_index, >>> tbl8_range, i; >>> - int32_t tbl8_recycle_index; >>> + int32_t tbl8_recycle_index, status = 0; >>> >>> /* >>> * Calculate the index into tbl24 and range. Note: All depths >>> larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, >> uint32_t ip_masked, >>> */ >>> lpm->tbl24[tbl24_index].valid = 0; >>> __atomic_thread_fence(__ATOMIC_RELEASE); >>> - tbl8_free(lpm, tbl8_group_start); >>> + status = tbl8_free(lpm, tbl8_group_start); >>> } else if (tbl8_recycle_index > -1) { >>> /* Update tbl24 entry. */ >>> struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10 >> +1120,10 >>> @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, >>> __atomic_store(&lpm->tbl24[tbl24_index], >> &new_tbl24_entry, >>> __ATOMIC_RELAXED); >>> __atomic_thread_fence(__ATOMIC_RELEASE); >>> - tbl8_free(lpm, tbl8_group_start); >>> + status = tbl8_free(lpm, tbl8_group_start); >>> } >>> #undef group_idx >>> - return 0; >>> + return status; >> >> This will change rte_lpm_delete API. As a suggestion, you can leave it as it >> was before ("return 0"), and send separate patch (with "return status)" >> which will be targeted to 20.11. >> > > Is the change of API because a variable is returned instead of constant? > The patch passed ABI check on Travis: http://mails.dpdk.org/archives/test-report/2020-July/144864.html > So I didn't know there is API/ABI issue. Because new error status codes are returned. At the moment rte_lpm_delete() returns only -EINVAL. After patches it will also returns -ENOSPC. The user's code may not handle this returned error status. On the other hand, from documentation about returned value: "0 on success, negative value otherwise", and given the fact that this behavior is only after calling rte_lpm_rcu_qsbr_add(), I think we can accept this patch. Bruce, please correct me. > > Thanks. > /Ruifeng >>> } >>> >>> /* >>> >> >> -- >> Regards, >> Vladimir Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com> -- Regards, Vladimir
On Tue, Jul 21, 2020 at 05:23:02PM +0100, Medvedkin, Vladimir wrote:
> Hi Ruifeng,
>
> On 18/07/2020 10:22, Ruifeng Wang wrote:
> >
> > > -----Original Message-----
> > > From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> > > Sent: Saturday, July 18, 2020 1:12 AM
> > > To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson
> > > <bruce.richardson@intel.com>
> > > Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> > > <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com>
> > > Subject: Re: [PATCH v2] lpm: fix unchecked return value
> > >
> > > Hi Ruifeng,
> > >
> > Hi Vladimir,
> >
> > > On 16/07/2020 16:49, Ruifeng Wang wrote:
> > > > Coverity complains about unchecked return value of
> > > rte_rcu_qsbr_dq_enqueue.
> > > > By default, defer queue size is big enough to hold all tbl8 groups.
> > > > When enqueue fails, return error to the user to indicate system issue.
> > > >
> > > > Coverity issue: 360832
> > > > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> > > >
> > > > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > > > ---
> > > > v2:
> > > > Converted return value to conform to LPM API convention. (Vladimir)
> > > >
> > > > lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
> > > > 1 file changed, 13 insertions(+), 6 deletions(-)
> > > >
> > > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index
> > > > 2db9e16a2..757436f49 100644
> > > > --- a/lib/librte_lpm/rte_lpm.c
> > > > +++ b/lib/librte_lpm/rte_lpm.c
> > > > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
> > > > return group_idx;
> > > > }
> > > >
> > > > -static void
> > > > +static int32_t
> > > > tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
> > > > {
> > > > struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
> > > > struct __rte_lpm *internal_lpm;
> > > > + int status;
> > > >
> > > > internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
> > > > if (internal_lpm->v == NULL) {
> > > > @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t
> > > tbl8_group_start)
> > > > __ATOMIC_RELAXED);
> > > > } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
> > > > /* Push into QSBR defer queue. */
> > > > - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > > > + status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > > > (void *)&tbl8_group_start);
> > > > + if (status == 1) {
> > > > + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
> > > > + return -rte_errno;
> > > > + }
> > > > }
> > > > +
> > > > + return 0;
> > > > }
> > > >
> > > > static __rte_noinline int32_t
> > > > @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
> > > ip_masked,
> > > > #define group_idx next_hop
> > > > uint32_t tbl24_index, tbl8_group_index, tbl8_group_start,
> > > tbl8_index,
> > > > tbl8_range, i;
> > > > - int32_t tbl8_recycle_index;
> > > > + int32_t tbl8_recycle_index, status = 0;
> > > >
> > > > /*
> > > > * Calculate the index into tbl24 and range. Note: All depths
> > > > larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm,
> > > uint32_t ip_masked,
> > > > */
> > > > lpm->tbl24[tbl24_index].valid = 0;
> > > > __atomic_thread_fence(__ATOMIC_RELEASE);
> > > > - tbl8_free(lpm, tbl8_group_start);
> > > > + status = tbl8_free(lpm, tbl8_group_start);
> > > > } else if (tbl8_recycle_index > -1) {
> > > > /* Update tbl24 entry. */
> > > > struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10
> > > +1120,10
> > > > @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
> > > > __atomic_store(&lpm->tbl24[tbl24_index],
> > > &new_tbl24_entry,
> > > > __ATOMIC_RELAXED);
> > > > __atomic_thread_fence(__ATOMIC_RELEASE);
> > > > - tbl8_free(lpm, tbl8_group_start);
> > > > + status = tbl8_free(lpm, tbl8_group_start);
> > > > }
> > > > #undef group_idx
> > > > - return 0;
> > > > + return status;
> > >
> > > This will change rte_lpm_delete API. As a suggestion, you can leave it as it
> > > was before ("return 0"), and send separate patch (with "return status)"
> > > which will be targeted to 20.11.
> > >
> >
> > Is the change of API because a variable is returned instead of constant?
> > The patch passed ABI check on Travis: http://mails.dpdk.org/archives/test-report/2020-July/144864.html
> > So I didn't know there is API/ABI issue.
>
>
> Because new error status codes are returned. At the moment rte_lpm_delete()
> returns only -EINVAL. After patches it will also returns -ENOSPC. The user's
> code may not handle this returned error status.
>
> On the other hand, from documentation about returned value:
> "0 on success, negative value otherwise",
> and given the fact that this behavior is only after calling
> rte_lpm_rcu_qsbr_add(), I think we can accept this patch.
> Bruce, please correct me.
>
That sounds reasonable to me. No change in the committed ABI, since the
specific values are not called out.
On Tue, Jul 21, 2020 at 7:16 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Tue, Jul 21, 2020 at 05:23:02PM +0100, Medvedkin, Vladimir wrote:
> > Hi Ruifeng,
> >
> > On 18/07/2020 10:22, Ruifeng Wang wrote:
> > >
> > > > -----Original Message-----
> > > > From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> > > > Sent: Saturday, July 18, 2020 1:12 AM
> > > > To: Ruifeng Wang <Ruifeng.Wang@arm.com>; Bruce Richardson
> > > > <bruce.richardson@intel.com>
> > > > Cc: dev@dpdk.org; nd <nd@arm.com>; Honnappa Nagarahalli
> > > > <Honnappa.Nagarahalli@arm.com>; Phil Yang <Phil.Yang@arm.com>
> > > > Subject: Re: [PATCH v2] lpm: fix unchecked return value
> > > >
> > > > Hi Ruifeng,
> > > >
> > > Hi Vladimir,
> > >
> > > > On 16/07/2020 16:49, Ruifeng Wang wrote:
> > > > > Coverity complains about unchecked return value of
> > > > rte_rcu_qsbr_dq_enqueue.
> > > > > By default, defer queue size is big enough to hold all tbl8 groups.
> > > > > When enqueue fails, return error to the user to indicate system issue.
> > > > >
> > > > > Coverity issue: 360832
> > > > > Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> > > > >
> > > > > Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > > > > ---
> > > > > v2:
> > > > > Converted return value to conform to LPM API convention. (Vladimir)
> > > > >
> > > > > lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
> > > > > 1 file changed, 13 insertions(+), 6 deletions(-)
> > > > >
> > > > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index
> > > > > 2db9e16a2..757436f49 100644
> > > > > --- a/lib/librte_lpm/rte_lpm.c
> > > > > +++ b/lib/librte_lpm/rte_lpm.c
> > > > > @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
> > > > > return group_idx;
> > > > > }
> > > > >
> > > > > -static void
> > > > > +static int32_t
> > > > > tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
> > > > > {
> > > > > struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
> > > > > struct __rte_lpm *internal_lpm;
> > > > > + int status;
> > > > >
> > > > > internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
> > > > > if (internal_lpm->v == NULL) {
> > > > > @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t
> > > > tbl8_group_start)
> > > > > __ATOMIC_RELAXED);
> > > > > } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
> > > > > /* Push into QSBR defer queue. */
> > > > > - rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > > > > + status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> > > > > (void *)&tbl8_group_start);
> > > > > + if (status == 1) {
> > > > > + RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
> > > > > + return -rte_errno;
> > > > > + }
> > > > > }
> > > > > +
> > > > > + return 0;
> > > > > }
> > > > >
> > > > > static __rte_noinline int32_t
> > > > > @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
> > > > ip_masked,
> > > > > #define group_idx next_hop
> > > > > uint32_t tbl24_index, tbl8_group_index, tbl8_group_start,
> > > > tbl8_index,
> > > > > tbl8_range, i;
> > > > > - int32_t tbl8_recycle_index;
> > > > > + int32_t tbl8_recycle_index, status = 0;
> > > > >
> > > > > /*
> > > > > * Calculate the index into tbl24 and range. Note: All depths
> > > > > larger @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm,
> > > > uint32_t ip_masked,
> > > > > */
> > > > > lpm->tbl24[tbl24_index].valid = 0;
> > > > > __atomic_thread_fence(__ATOMIC_RELEASE);
> > > > > - tbl8_free(lpm, tbl8_group_start);
> > > > > + status = tbl8_free(lpm, tbl8_group_start);
> > > > > } else if (tbl8_recycle_index > -1) {
> > > > > /* Update tbl24 entry. */
> > > > > struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1113,10
> > > > +1120,10
> > > > > @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
> > > > > __atomic_store(&lpm->tbl24[tbl24_index],
> > > > &new_tbl24_entry,
> > > > > __ATOMIC_RELAXED);
> > > > > __atomic_thread_fence(__ATOMIC_RELEASE);
> > > > > - tbl8_free(lpm, tbl8_group_start);
> > > > > + status = tbl8_free(lpm, tbl8_group_start);
> > > > > }
> > > > > #undef group_idx
> > > > > - return 0;
> > > > > + return status;
> > > >
> > > > This will change rte_lpm_delete API. As a suggestion, you can leave it as it
> > > > was before ("return 0"), and send separate patch (with "return status)"
> > > > which will be targeted to 20.11.
> > > >
> > >
> > > Is the change of API because a variable is returned instead of constant?
> > > The patch passed ABI check on Travis: http://mails.dpdk.org/archives/test-report/2020-July/144864.html
> > > So I didn't know there is API/ABI issue.
> >
> >
> > Because new error status codes are returned. At the moment rte_lpm_delete()
> > returns only -EINVAL. After patches it will also returns -ENOSPC. The user's
> > code may not handle this returned error status.
> >
> > On the other hand, from documentation about returned value:
> > "0 on success, negative value otherwise",
> > and given the fact that this behavior is only after calling
> > rte_lpm_rcu_qsbr_add(), I think we can accept this patch.
> > Bruce, please correct me.
> >
> That sounds reasonable to me. No change in the committed ABI, since the
> specific values are not called out.
>
I will take this as a second ack and merge this fix for rc2.
Thanks.
--
David Marchand
On Thu, Jul 16, 2020 at 5:49 PM Ruifeng Wang <ruifeng.wang@arm.com> wrote:
>
> Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
> By default, defer queue size is big enough to hold all tbl8 groups. When
> enqueue fails, return error to the user to indicate system issue.
>
> Coverity issue: 360832
> Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
>
> Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Applied, thanks.
--
David Marchand