These methods were introduced in 20.05. There has been no changes in their public API since then. They seem mature enough to remove the experimental tag. Signed-off-by: Sean Morrissey <sean.morrissey@intel.com> --- lib/ring/rte_ring_hts.h | 9 --------- lib/ring/rte_ring_peek.h | 13 ------------- lib/ring/rte_ring_peek_zc.h | 13 ------------- lib/ring/rte_ring_rts.h | 13 ------------- 4 files changed, 48 deletions(-) diff --git a/lib/ring/rte_ring_hts.h b/lib/ring/rte_ring_hts.h index a9342083f4..9a5938ac58 100644 --- a/lib/ring/rte_ring_hts.h +++ b/lib/ring/rte_ring_hts.h @@ -12,7 +12,6 @@ /** * @file rte_ring_hts.h - * @b EXPERIMENTAL: this API may change without prior notice * It is not recommended to include this file directly. * Please include <rte_ring.h> instead. * @@ -50,7 +49,6 @@ extern "C" { * @return * The number of objects enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space) @@ -78,7 +76,6 @@ rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, * @return * The number of objects dequeued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -106,7 +103,6 @@ rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, * @return * - n: Actual number of objects enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space) @@ -136,7 +132,6 @@ rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, * @return * - n: Actual number of objects dequeued, 0 if ring is empty */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -160,7 +155,6 @@ rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, * @return * The number of objects enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk(struct rte_ring *r, void * const *obj_table, unsigned int n, unsigned int *free_space) @@ -184,7 +178,6 @@ rte_ring_mp_hts_enqueue_bulk(struct rte_ring *r, void * const *obj_table, * @return * The number of objects dequeued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -208,7 +201,6 @@ rte_ring_mc_hts_dequeue_bulk(struct rte_ring *r, void **obj_table, * @return * - n: Actual number of objects enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst(struct rte_ring *r, void * const *obj_table, unsigned int n, unsigned int *free_space) @@ -234,7 +226,6 @@ rte_ring_mp_hts_enqueue_burst(struct rte_ring *r, void * const *obj_table, * @return * - n: Actual number of objects dequeued, 0 if ring is empty */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) diff --git a/lib/ring/rte_ring_peek.h b/lib/ring/rte_ring_peek.h index 3f8f6fc1c0..c0621d12e2 100644 --- a/lib/ring/rte_ring_peek.h +++ b/lib/ring/rte_ring_peek.h @@ -12,7 +12,6 @@ /** * @file - * @b EXPERIMENTAL: this API may change without prior notice * It is not recommended to include this file directly. * Please include <rte_ring_elem.h> instead. * @@ -67,7 +66,6 @@ extern "C" { * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n, unsigned int *free_space) @@ -93,7 +91,6 @@ rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n, * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n, unsigned int *free_space) @@ -118,7 +115,6 @@ rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n, * @return * Actual number of objects that can be enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n, unsigned int *free_space) @@ -144,7 +140,6 @@ rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n, * @return * Actual number of objects that can be enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n, unsigned int *free_space) @@ -168,7 +163,6 @@ rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n, * @param n * The number of objects to add to the ring from the obj_table. */ -__rte_experimental static __rte_always_inline void rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n) @@ -208,7 +202,6 @@ rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table, * @param n * The number of objects to add to the ring from the obj_table. */ -__rte_experimental static __rte_always_inline void rte_ring_enqueue_finish(struct rte_ring *r, void * const *obj_table, unsigned int n) @@ -237,7 +230,6 @@ rte_ring_enqueue_finish(struct rte_ring *r, void * const *obj_table, * @return * The number of objects dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -263,7 +255,6 @@ rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table, * @return * Actual number of objects dequeued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -293,7 +284,6 @@ rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table, * @return * The actual number of objects dequeued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -319,7 +309,6 @@ rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table, * @return * The actual number of objects dequeued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -338,7 +327,6 @@ rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table, * @param n * The number of objects to remove from the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n) { @@ -371,7 +359,6 @@ rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n) * @param n * The number of objects to remove from the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_dequeue_finish(struct rte_ring *r, unsigned int n) { diff --git a/lib/ring/rte_ring_peek_zc.h b/lib/ring/rte_ring_peek_zc.h index be677a3e1f..8fb279c37e 100644 --- a/lib/ring/rte_ring_peek_zc.h +++ b/lib/ring/rte_ring_peek_zc.h @@ -12,7 +12,6 @@ /** * @file - * @b EXPERIMENTAL: this API may change without prior notice * It is not recommended to include this file directly. * Please include <rte_ring_elem.h> instead. * @@ -177,7 +176,6 @@ __rte_ring_do_enqueue_zc_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space) @@ -208,7 +206,6 @@ rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space) @@ -240,7 +237,6 @@ rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n, * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space) @@ -271,7 +267,6 @@ rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be enqueued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space) @@ -290,7 +285,6 @@ rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n, * @param n * The number of objects to add to the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n) { @@ -323,7 +317,6 @@ rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n) * @param n * The number of pointers to objects to add to the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n) { @@ -390,7 +383,6 @@ __rte_ring_do_dequeue_zc_elem_start(struct rte_ring *r, * @return * The number of objects that can be dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available) @@ -420,7 +412,6 @@ rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available) @@ -453,7 +444,6 @@ rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n, * @return * The number of objects that can be dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available) @@ -483,7 +473,6 @@ rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available) @@ -502,7 +491,6 @@ rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n, * @param n * The number of objects to remove from the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n) { @@ -535,7 +523,6 @@ rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n) * @param n * The number of objects to remove from the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n) { diff --git a/lib/ring/rte_ring_rts.h b/lib/ring/rte_ring_rts.h index 9570aec8f4..50fc8f74db 100644 --- a/lib/ring/rte_ring_rts.h +++ b/lib/ring/rte_ring_rts.h @@ -12,7 +12,6 @@ /** * @file rte_ring_rts.h - * @b EXPERIMENTAL: this API may change without prior notice * It is not recommended to include this file directly. * Please include <rte_ring.h> instead. * @@ -77,7 +76,6 @@ extern "C" { * @return * The number of objects enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space) @@ -105,7 +103,6 @@ rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, * @return * The number of objects dequeued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -133,7 +130,6 @@ rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, * @return * - n: Actual number of objects enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space) @@ -163,7 +159,6 @@ rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, * @return * - n: Actual number of objects dequeued, 0 if ring is empty */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -187,7 +182,6 @@ rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, * @return * The number of objects enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk(struct rte_ring *r, void * const *obj_table, unsigned int n, unsigned int *free_space) @@ -211,7 +205,6 @@ rte_ring_mp_rts_enqueue_bulk(struct rte_ring *r, void * const *obj_table, * @return * The number of objects dequeued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -235,7 +228,6 @@ rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table, * @return * - n: Actual number of objects enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst(struct rte_ring *r, void * const *obj_table, unsigned int n, unsigned int *free_space) @@ -261,7 +253,6 @@ rte_ring_mp_rts_enqueue_burst(struct rte_ring *r, void * const *obj_table, * @return * - n: Actual number of objects dequeued, 0 if ring is empty */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -279,7 +270,6 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table, * Producer HTD value, if producer is set in appropriate sync mode, * or UINT32_MAX otherwise. */ -__rte_experimental static inline uint32_t rte_ring_get_prod_htd_max(const struct rte_ring *r) { @@ -299,7 +289,6 @@ rte_ring_get_prod_htd_max(const struct rte_ring *r) * @return * Zero on success, or negative error code otherwise. */ -__rte_experimental static inline int rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v) { @@ -319,7 +308,6 @@ rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v) * Consumer HTD value, if consumer is set in appropriate sync mode, * or UINT32_MAX otherwise. */ -__rte_experimental static inline uint32_t rte_ring_get_cons_htd_max(const struct rte_ring *r) { @@ -339,7 +327,6 @@ rte_ring_get_cons_htd_max(const struct rte_ring *r) * @return * Zero on success, or negative error code otherwise. */ -__rte_experimental static inline int rte_ring_set_cons_htd_max(struct rte_ring *r, uint32_t v) { -- 2.25.1
> > These methods were introduced in 20.05. > There has been no changes in their public API since then. > They seem mature enough to remove the experimental tag. > > Signed-off-by: Sean Morrissey <sean.morrissey@intel.com> > --- Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > -- > 2.25.1
Hello,
On Thu, Sep 30, 2021 at 7:42 PM Sean Morrissey <sean.morrissey@intel.com> wrote:
>
> These methods were introduced in 20.05.
> There has been no changes in their public API since then.
> They seem mature enough to remove the experimental tag.
I am a bit skeptical at the patch.
Can you double check?
/** prod/cons sync types */
enum rte_ring_sync_type {
RTE_RING_SYNC_MT, /**< multi-thread safe (default mode) */
RTE_RING_SYNC_ST, /**< single thread only */
#ifdef ALLOW_EXPERIMENTAL_API
RTE_RING_SYNC_MT_RTS, /**< multi-thread relaxed tail sync */
RTE_RING_SYNC_MT_HTS, /**< multi-thread head/tail sync */
#endif
};
There is also in rte_ring_elem.h:
#ifdef ALLOW_EXPERIMENTAL_API
#include <rte_ring_hts.h>
#include <rte_ring_rts.h>
#endif
And later
static __rte_always_inline unsigned int
rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
unsigned int esize, unsigned int n, unsigned int *free_space)
{
...
#ifdef ALLOW_EXPERIMENTAL_API
case RTE_RING_SYNC_MT_RTS:
return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
free_space);
I don't think those HTS and RTS modes work if the code is compiled
without the experimental flag.
--
David Marchand
Hi David,
> Hello,
>
> On Thu, Sep 30, 2021 at 7:42 PM Sean Morrissey <sean.morrissey@intel.com> wrote:
> >
> > These methods were introduced in 20.05.
> > There has been no changes in their public API since then.
> > They seem mature enough to remove the experimental tag.
>
> I am a bit skeptical at the patch.
> Can you double check?
>
>
> /** prod/cons sync types */
> enum rte_ring_sync_type {
> RTE_RING_SYNC_MT, /**< multi-thread safe (default mode) */
> RTE_RING_SYNC_ST, /**< single thread only */
> #ifdef ALLOW_EXPERIMENTAL_API
> RTE_RING_SYNC_MT_RTS, /**< multi-thread relaxed tail sync */
> RTE_RING_SYNC_MT_HTS, /**< multi-thread head/tail sync */
> #endif
> };
>
> There is also in rte_ring_elem.h:
>
> #ifdef ALLOW_EXPERIMENTAL_API
> #include <rte_ring_hts.h>
> #include <rte_ring_rts.h>
> #endif
>
> And later
>
> static __rte_always_inline unsigned int
> rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
> unsigned int esize, unsigned int n, unsigned int *free_space)
> {
> ...
> #ifdef ALLOW_EXPERIMENTAL_API
> case RTE_RING_SYNC_MT_RTS:
> return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
> free_space);
>
>
> I don't think those HTS and RTS modes work if the code is compiled
> without the experimental flag.
>
You are right, sorry I forgot about these things ☹
Will change in v2.
These methods were introduced in 20.05. There has been no changes in their public API since then. They seem mature enough to remove the experimental tag. Signed-off-by: Sean Morrissey <sean.morrissey@intel.com> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com> --- lib/ring/rte_ring_core.h | 2 -- lib/ring/rte_ring_elem.h | 12 ------------ lib/ring/rte_ring_hts.h | 9 --------- lib/ring/rte_ring_peek.h | 13 ------------- lib/ring/rte_ring_peek_zc.h | 13 ------------- lib/ring/rte_ring_rts.h | 13 ------------- 6 files changed, 62 deletions(-) diff --git a/lib/ring/rte_ring_core.h b/lib/ring/rte_ring_core.h index 16718ca7f1..4f80c91b72 100644 --- a/lib/ring/rte_ring_core.h +++ b/lib/ring/rte_ring_core.h @@ -57,10 +57,8 @@ enum rte_ring_queue_behavior { enum rte_ring_sync_type { RTE_RING_SYNC_MT, /**< multi-thread safe (default mode) */ RTE_RING_SYNC_ST, /**< single thread only */ -#ifdef ALLOW_EXPERIMENTAL_API RTE_RING_SYNC_MT_RTS, /**< multi-thread relaxed tail sync */ RTE_RING_SYNC_MT_HTS, /**< multi-thread head/tail sync */ -#endif }; /** diff --git a/lib/ring/rte_ring_elem.h b/lib/ring/rte_ring_elem.h index 98c5495e02..4bd016c110 100644 --- a/lib/ring/rte_ring_elem.h +++ b/lib/ring/rte_ring_elem.h @@ -165,10 +165,8 @@ rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, free_space); } -#ifdef ALLOW_EXPERIMENTAL_API #include <rte_ring_hts.h> #include <rte_ring_rts.h> -#endif /** * Enqueue several objects on a ring. @@ -204,14 +202,12 @@ rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, case RTE_RING_SYNC_ST: return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -#ifdef ALLOW_EXPERIMENTAL_API case RTE_RING_SYNC_MT_RTS: return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); case RTE_RING_SYNC_MT_HTS: return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n, free_space); -#endif } /* valid ring should never reach this point */ @@ -388,14 +384,12 @@ rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, case RTE_RING_SYNC_ST: return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n, available); -#ifdef ALLOW_EXPERIMENTAL_API case RTE_RING_SYNC_MT_RTS: return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize, n, available); case RTE_RING_SYNC_MT_HTS: return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize, n, available); -#endif } /* valid ring should never reach this point */ @@ -576,14 +570,12 @@ rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, case RTE_RING_SYNC_ST: return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n, free_space); -#ifdef ALLOW_EXPERIMENTAL_API case RTE_RING_SYNC_MT_RTS: return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize, n, free_space); case RTE_RING_SYNC_MT_HTS: return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize, n, free_space); -#endif } /* valid ring should never reach this point */ @@ -688,14 +680,12 @@ rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, case RTE_RING_SYNC_ST: return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n, available); -#ifdef ALLOW_EXPERIMENTAL_API case RTE_RING_SYNC_MT_RTS: return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize, n, available); case RTE_RING_SYNC_MT_HTS: return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize, n, available); -#endif } /* valid ring should never reach this point */ @@ -705,10 +695,8 @@ rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, return 0; } -#ifdef ALLOW_EXPERIMENTAL_API #include <rte_ring_peek.h> #include <rte_ring_peek_zc.h> -#endif #include <rte_ring.h> diff --git a/lib/ring/rte_ring_hts.h b/lib/ring/rte_ring_hts.h index a9342083f4..9a5938ac58 100644 --- a/lib/ring/rte_ring_hts.h +++ b/lib/ring/rte_ring_hts.h @@ -12,7 +12,6 @@ /** * @file rte_ring_hts.h - * @b EXPERIMENTAL: this API may change without prior notice * It is not recommended to include this file directly. * Please include <rte_ring.h> instead. * @@ -50,7 +49,6 @@ extern "C" { * @return * The number of objects enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space) @@ -78,7 +76,6 @@ rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, * @return * The number of objects dequeued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -106,7 +103,6 @@ rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, * @return * - n: Actual number of objects enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space) @@ -136,7 +132,6 @@ rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, * @return * - n: Actual number of objects dequeued, 0 if ring is empty */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -160,7 +155,6 @@ rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, * @return * The number of objects enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk(struct rte_ring *r, void * const *obj_table, unsigned int n, unsigned int *free_space) @@ -184,7 +178,6 @@ rte_ring_mp_hts_enqueue_bulk(struct rte_ring *r, void * const *obj_table, * @return * The number of objects dequeued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -208,7 +201,6 @@ rte_ring_mc_hts_dequeue_bulk(struct rte_ring *r, void **obj_table, * @return * - n: Actual number of objects enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst(struct rte_ring *r, void * const *obj_table, unsigned int n, unsigned int *free_space) @@ -234,7 +226,6 @@ rte_ring_mp_hts_enqueue_burst(struct rte_ring *r, void * const *obj_table, * @return * - n: Actual number of objects dequeued, 0 if ring is empty */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) diff --git a/lib/ring/rte_ring_peek.h b/lib/ring/rte_ring_peek.h index 3f8f6fc1c0..c0621d12e2 100644 --- a/lib/ring/rte_ring_peek.h +++ b/lib/ring/rte_ring_peek.h @@ -12,7 +12,6 @@ /** * @file - * @b EXPERIMENTAL: this API may change without prior notice * It is not recommended to include this file directly. * Please include <rte_ring_elem.h> instead. * @@ -67,7 +66,6 @@ extern "C" { * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n, unsigned int *free_space) @@ -93,7 +91,6 @@ rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n, * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n, unsigned int *free_space) @@ -118,7 +115,6 @@ rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n, * @return * Actual number of objects that can be enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n, unsigned int *free_space) @@ -144,7 +140,6 @@ rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n, * @return * Actual number of objects that can be enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n, unsigned int *free_space) @@ -168,7 +163,6 @@ rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n, * @param n * The number of objects to add to the ring from the obj_table. */ -__rte_experimental static __rte_always_inline void rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n) @@ -208,7 +202,6 @@ rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table, * @param n * The number of objects to add to the ring from the obj_table. */ -__rte_experimental static __rte_always_inline void rte_ring_enqueue_finish(struct rte_ring *r, void * const *obj_table, unsigned int n) @@ -237,7 +230,6 @@ rte_ring_enqueue_finish(struct rte_ring *r, void * const *obj_table, * @return * The number of objects dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -263,7 +255,6 @@ rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table, * @return * Actual number of objects dequeued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -293,7 +284,6 @@ rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table, * @return * The actual number of objects dequeued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -319,7 +309,6 @@ rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table, * @return * The actual number of objects dequeued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -338,7 +327,6 @@ rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table, * @param n * The number of objects to remove from the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n) { @@ -371,7 +359,6 @@ rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n) * @param n * The number of objects to remove from the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_dequeue_finish(struct rte_ring *r, unsigned int n) { diff --git a/lib/ring/rte_ring_peek_zc.h b/lib/ring/rte_ring_peek_zc.h index be677a3e1f..8fb279c37e 100644 --- a/lib/ring/rte_ring_peek_zc.h +++ b/lib/ring/rte_ring_peek_zc.h @@ -12,7 +12,6 @@ /** * @file - * @b EXPERIMENTAL: this API may change without prior notice * It is not recommended to include this file directly. * Please include <rte_ring_elem.h> instead. * @@ -177,7 +176,6 @@ __rte_ring_do_enqueue_zc_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space) @@ -208,7 +206,6 @@ rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space) @@ -240,7 +237,6 @@ rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n, * @return * The number of objects that can be enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space) @@ -271,7 +267,6 @@ rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be enqueued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space) @@ -290,7 +285,6 @@ rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n, * @param n * The number of objects to add to the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n) { @@ -323,7 +317,6 @@ rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n) * @param n * The number of pointers to objects to add to the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n) { @@ -390,7 +383,6 @@ __rte_ring_do_dequeue_zc_elem_start(struct rte_ring *r, * @return * The number of objects that can be dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available) @@ -420,7 +412,6 @@ rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available) @@ -453,7 +444,6 @@ rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n, * @return * The number of objects that can be dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available) @@ -483,7 +473,6 @@ rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, * @return * The number of objects that can be dequeued, either 0 or n. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available) @@ -502,7 +491,6 @@ rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n, * @param n * The number of objects to remove from the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n) { @@ -535,7 +523,6 @@ rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n) * @param n * The number of objects to remove from the ring. */ -__rte_experimental static __rte_always_inline void rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n) { diff --git a/lib/ring/rte_ring_rts.h b/lib/ring/rte_ring_rts.h index 9570aec8f4..50fc8f74db 100644 --- a/lib/ring/rte_ring_rts.h +++ b/lib/ring/rte_ring_rts.h @@ -12,7 +12,6 @@ /** * @file rte_ring_rts.h - * @b EXPERIMENTAL: this API may change without prior notice * It is not recommended to include this file directly. * Please include <rte_ring.h> instead. * @@ -77,7 +76,6 @@ extern "C" { * @return * The number of objects enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space) @@ -105,7 +103,6 @@ rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, * @return * The number of objects dequeued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -133,7 +130,6 @@ rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, * @return * - n: Actual number of objects enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space) @@ -163,7 +159,6 @@ rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, * @return * - n: Actual number of objects dequeued, 0 if ring is empty */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available) @@ -187,7 +182,6 @@ rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, * @return * The number of objects enqueued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk(struct rte_ring *r, void * const *obj_table, unsigned int n, unsigned int *free_space) @@ -211,7 +205,6 @@ rte_ring_mp_rts_enqueue_bulk(struct rte_ring *r, void * const *obj_table, * @return * The number of objects dequeued, either 0 or n */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -235,7 +228,6 @@ rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table, * @return * - n: Actual number of objects enqueued. */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst(struct rte_ring *r, void * const *obj_table, unsigned int n, unsigned int *free_space) @@ -261,7 +253,6 @@ rte_ring_mp_rts_enqueue_burst(struct rte_ring *r, void * const *obj_table, * @return * - n: Actual number of objects dequeued, 0 if ring is empty */ -__rte_experimental static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available) @@ -279,7 +270,6 @@ rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table, * Producer HTD value, if producer is set in appropriate sync mode, * or UINT32_MAX otherwise. */ -__rte_experimental static inline uint32_t rte_ring_get_prod_htd_max(const struct rte_ring *r) { @@ -299,7 +289,6 @@ rte_ring_get_prod_htd_max(const struct rte_ring *r) * @return * Zero on success, or negative error code otherwise. */ -__rte_experimental static inline int rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v) { @@ -319,7 +308,6 @@ rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v) * Consumer HTD value, if consumer is set in appropriate sync mode, * or UINT32_MAX otherwise. */ -__rte_experimental static inline uint32_t rte_ring_get_cons_htd_max(const struct rte_ring *r) { @@ -339,7 +327,6 @@ rte_ring_get_cons_htd_max(const struct rte_ring *r) * @return * Zero on success, or negative error code otherwise. */ -__rte_experimental static inline int rte_ring_set_cons_htd_max(struct rte_ring *r, uint32_t v) { -- 2.25.1
On Mon, Oct 4, 2021 at 11:22 AM Sean Morrissey <sean.morrissey@intel.com> wrote:
>
> These methods were introduced in 20.05.
> There has been no changes in their public API since then.
> They seem mature enough to remove the experimental tag.
>
> Signed-off-by: Sean Morrissey <sean.morrissey@intel.com>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
For future patches Sean, we don't prefix patches with lib/ in the title.
Applied, thanks.
--
David Marchand