Two new sync modes were introduced into rte_ring: relaxed tail sync (RTS) and head/tail sync (HTS). This change provides user with ability to select these modes for ring based mempool via mempool ops API. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> --- drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- 1 file changed, 88 insertions(+), 9 deletions(-) diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c index bc123fc52..15ec7dee7 100644 --- a/drivers/mempool/ring/rte_mempool_ring.c +++ b/drivers/mempool/ring/rte_mempool_ring.c @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static int common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) { @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static unsigned common_ring_get_count(const struct rte_mempool *mp) { return rte_ring_count(mp->pool_data); } - static int -common_ring_alloc(struct rte_mempool *mp) +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) { - int rg_flags = 0, ret; + int ret; char rg_name[RTE_RING_NAMESIZE]; struct rte_ring *r; @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) return -rte_errno; } - /* ring flags */ - if (mp->flags & MEMPOOL_F_SP_PUT) - rg_flags |= RING_F_SP_ENQ; - if (mp->flags & MEMPOOL_F_SC_GET) - rg_flags |= RING_F_SC_DEQ; - /* * Allocate the ring that will be used to store objects. * Ring functions will return appropriate errors if we are @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) return 0; } +static int +common_ring_alloc(struct rte_mempool *mp) +{ + uint32_t rg_flags; + + rg_flags = 0; + + /* ring flags */ + if (mp->flags & MEMPOOL_F_SP_PUT) + rg_flags |= RING_F_SP_ENQ; + if (mp->flags & MEMPOOL_F_SC_GET) + rg_flags |= RING_F_SC_DEQ; + + return ring_alloc(mp, rg_flags); +} + +static int +rts_ring_alloc(struct rte_mempool *mp) +{ + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) + return -EINVAL; + + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); +} + +static int +hts_ring_alloc(struct rte_mempool *mp) +{ + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) + return -EINVAL; + + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); +} + static void common_ring_free(struct rte_mempool *mp) { @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = { .get_count = common_ring_get_count, }; +/* ops for mempool with ring in MT_RTS sync mode */ +static const struct rte_mempool_ops ops_mt_rts = { + .name = "ring_mt_rts", + .alloc = rts_ring_alloc, + .free = common_ring_free, + .enqueue = rts_ring_mp_enqueue, + .dequeue = rts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + +/* ops for mempool with ring in MT_HTS sync mode */ +static const struct rte_mempool_ops ops_mt_hts = { + .name = "ring_mt_hts", + .alloc = hts_ring_alloc, + .free = common_ring_free, + .enqueue = hts_ring_mp_enqueue, + .dequeue = hts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + MEMPOOL_REGISTER_OPS(ops_mp_mc); MEMPOOL_REGISTER_OPS(ops_sp_sc); MEMPOOL_REGISTER_OPS(ops_mp_sc); MEMPOOL_REGISTER_OPS(ops_sp_mc); +MEMPOOL_REGISTER_OPS(ops_mt_rts); +MEMPOOL_REGISTER_OPS(ops_mt_hts); -- 2.17.1
v2: - update Release Notes (as per comments) Two new sync modes were introduced into rte_ring: relaxed tail sync (RTS) and head/tail sync (HTS). This change provides user with ability to select these modes for ring based mempool via mempool ops API. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Acked-by: Gage Eads <gage.eads@intel.com> --- doc/guides/rel_notes/release_20_08.rst | 6 ++ drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- 2 files changed, 94 insertions(+), 9 deletions(-) diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst index eaaf11c37..7bdcf3aac 100644 --- a/doc/guides/rel_notes/release_20_08.rst +++ b/doc/guides/rel_notes/release_20_08.rst @@ -84,6 +84,12 @@ New Features * Dump ``rte_flow`` memory consumption. * Measure packet per second forwarding. +* **Added support for new sync modes into mempool ring driver.** + + Added ability to select new ring synchronisation modes: + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` + via mempool ops API. + Removed Items ------------- diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c index bc123fc52..15ec7dee7 100644 --- a/drivers/mempool/ring/rte_mempool_ring.c +++ b/drivers/mempool/ring/rte_mempool_ring.c @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static int common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) { @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static unsigned common_ring_get_count(const struct rte_mempool *mp) { return rte_ring_count(mp->pool_data); } - static int -common_ring_alloc(struct rte_mempool *mp) +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) { - int rg_flags = 0, ret; + int ret; char rg_name[RTE_RING_NAMESIZE]; struct rte_ring *r; @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) return -rte_errno; } - /* ring flags */ - if (mp->flags & MEMPOOL_F_SP_PUT) - rg_flags |= RING_F_SP_ENQ; - if (mp->flags & MEMPOOL_F_SC_GET) - rg_flags |= RING_F_SC_DEQ; - /* * Allocate the ring that will be used to store objects. * Ring functions will return appropriate errors if we are @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) return 0; } +static int +common_ring_alloc(struct rte_mempool *mp) +{ + uint32_t rg_flags; + + rg_flags = 0; + + /* ring flags */ + if (mp->flags & MEMPOOL_F_SP_PUT) + rg_flags |= RING_F_SP_ENQ; + if (mp->flags & MEMPOOL_F_SC_GET) + rg_flags |= RING_F_SC_DEQ; + + return ring_alloc(mp, rg_flags); +} + +static int +rts_ring_alloc(struct rte_mempool *mp) +{ + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) + return -EINVAL; + + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); +} + +static int +hts_ring_alloc(struct rte_mempool *mp) +{ + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) + return -EINVAL; + + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); +} + static void common_ring_free(struct rte_mempool *mp) { @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = { .get_count = common_ring_get_count, }; +/* ops for mempool with ring in MT_RTS sync mode */ +static const struct rte_mempool_ops ops_mt_rts = { + .name = "ring_mt_rts", + .alloc = rts_ring_alloc, + .free = common_ring_free, + .enqueue = rts_ring_mp_enqueue, + .dequeue = rts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + +/* ops for mempool with ring in MT_HTS sync mode */ +static const struct rte_mempool_ops ops_mt_hts = { + .name = "ring_mt_hts", + .alloc = hts_ring_alloc, + .free = common_ring_free, + .enqueue = hts_ring_mp_enqueue, + .dequeue = hts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + MEMPOOL_REGISTER_OPS(ops_mp_mc); MEMPOOL_REGISTER_OPS(ops_sp_sc); MEMPOOL_REGISTER_OPS(ops_mp_sc); MEMPOOL_REGISTER_OPS(ops_sp_mc); +MEMPOOL_REGISTER_OPS(ops_mt_rts); +MEMPOOL_REGISTER_OPS(ops_mt_hts); -- 2.17.1
Hi Konstantin, On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote: > v2: > - update Release Notes (as per comments) > > Two new sync modes were introduced into rte_ring: > relaxed tail sync (RTS) and head/tail sync (HTS). > This change provides user with ability to select these > modes for ring based mempool via mempool ops API. > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > Acked-by: Gage Eads <gage.eads@intel.com> > --- > doc/guides/rel_notes/release_20_08.rst | 6 ++ > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- > 2 files changed, 94 insertions(+), 9 deletions(-) > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst > index eaaf11c37..7bdcf3aac 100644 > --- a/doc/guides/rel_notes/release_20_08.rst > +++ b/doc/guides/rel_notes/release_20_08.rst > @@ -84,6 +84,12 @@ New Features > * Dump ``rte_flow`` memory consumption. > * Measure packet per second forwarding. > > +* **Added support for new sync modes into mempool ring driver.** > + > + Added ability to select new ring synchronisation modes: > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > + via mempool ops API. > + > > Removed Items > ------------- > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c > index bc123fc52..15ec7dee7 100644 > --- a/drivers/mempool/ring/rte_mempool_ring.c > +++ b/drivers/mempool/ring/rte_mempool_ring.c > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > } > > +static int > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > + unsigned int n) > +{ > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > +} > + > +static int > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > + unsigned int n) > +{ > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > +} > + > static int > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > { > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > } > > +static int > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > +{ > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > +} > + > +static int > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > +{ > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > +} > + > static unsigned > common_ring_get_count(const struct rte_mempool *mp) > { > return rte_ring_count(mp->pool_data); > } > > - > static int > -common_ring_alloc(struct rte_mempool *mp) > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) > { > - int rg_flags = 0, ret; > + int ret; > char rg_name[RTE_RING_NAMESIZE]; > struct rte_ring *r; > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) > return -rte_errno; > } > > - /* ring flags */ > - if (mp->flags & MEMPOOL_F_SP_PUT) > - rg_flags |= RING_F_SP_ENQ; > - if (mp->flags & MEMPOOL_F_SC_GET) > - rg_flags |= RING_F_SC_DEQ; > - > /* > * Allocate the ring that will be used to store objects. > * Ring functions will return appropriate errors if we are > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) > return 0; > } > > +static int > +common_ring_alloc(struct rte_mempool *mp) > +{ > + uint32_t rg_flags; > + > + rg_flags = 0; Maybe it could go on the same line > + > + /* ring flags */ Not sure we need to keep this comment > + if (mp->flags & MEMPOOL_F_SP_PUT) > + rg_flags |= RING_F_SP_ENQ; > + if (mp->flags & MEMPOOL_F_SC_GET) > + rg_flags |= RING_F_SC_DEQ; > + > + return ring_alloc(mp, rg_flags); > +} > + > +static int > +rts_ring_alloc(struct rte_mempool *mp) > +{ > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > + return -EINVAL; Why do we need this? It is a problem to allow sc/sp in this mode (even if it's not optimal)? > + > + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); > +} > + > +static int > +hts_ring_alloc(struct rte_mempool *mp) > +{ > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > + return -EINVAL; > + > + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); > +} > + > static void > common_ring_free(struct rte_mempool *mp) > { > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = { > .get_count = common_ring_get_count, > }; > > +/* ops for mempool with ring in MT_RTS sync mode */ > +static const struct rte_mempool_ops ops_mt_rts = { > + .name = "ring_mt_rts", > + .alloc = rts_ring_alloc, > + .free = common_ring_free, > + .enqueue = rts_ring_mp_enqueue, > + .dequeue = rts_ring_mc_dequeue, > + .get_count = common_ring_get_count, > +}; > + > +/* ops for mempool with ring in MT_HTS sync mode */ > +static const struct rte_mempool_ops ops_mt_hts = { > + .name = "ring_mt_hts", > + .alloc = hts_ring_alloc, > + .free = common_ring_free, > + .enqueue = hts_ring_mp_enqueue, > + .dequeue = hts_ring_mc_dequeue, > + .get_count = common_ring_get_count, > +}; > + > MEMPOOL_REGISTER_OPS(ops_mp_mc); > MEMPOOL_REGISTER_OPS(ops_sp_sc); > MEMPOOL_REGISTER_OPS(ops_mp_sc); > MEMPOOL_REGISTER_OPS(ops_sp_mc); > +MEMPOOL_REGISTER_OPS(ops_mt_rts); > +MEMPOOL_REGISTER_OPS(ops_mt_hts); Not really related to your patch, but I think we need a function to dump the name of available mempool ops. We could even add a description. The problem we have is that a user does not know on which criteria is should use a driver or another (except for platform drivers). Olivier
Hi Olivier, > Hi Konstantin, > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote: > > v2: > > - update Release Notes (as per comments) > > > > Two new sync modes were introduced into rte_ring: > > relaxed tail sync (RTS) and head/tail sync (HTS). > > This change provides user with ability to select these > > modes for ring based mempool via mempool ops API. > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > > Acked-by: Gage Eads <gage.eads@intel.com> > > --- > > doc/guides/rel_notes/release_20_08.rst | 6 ++ > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- > > 2 files changed, 94 insertions(+), 9 deletions(-) > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst > > index eaaf11c37..7bdcf3aac 100644 > > --- a/doc/guides/rel_notes/release_20_08.rst > > +++ b/doc/guides/rel_notes/release_20_08.rst > > @@ -84,6 +84,12 @@ New Features > > * Dump ``rte_flow`` memory consumption. > > * Measure packet per second forwarding. > > > > +* **Added support for new sync modes into mempool ring driver.** > > + > > + Added ability to select new ring synchronisation modes: > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > > + via mempool ops API. > > + > > > > Removed Items > > ------------- > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c > > index bc123fc52..15ec7dee7 100644 > > --- a/drivers/mempool/ring/rte_mempool_ring.c > > +++ b/drivers/mempool/ring/rte_mempool_ring.c > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > } > > > > +static int > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > + unsigned int n) > > +{ > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > +} > > + > > +static int > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > + unsigned int n) > > +{ > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > +} > > + > > static int > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > { > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > } > > > > +static int > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > +{ > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > +} > > + > > +static int > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > +{ > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > +} > > + > > static unsigned > > common_ring_get_count(const struct rte_mempool *mp) > > { > > return rte_ring_count(mp->pool_data); > > } > > > > - > > static int > > -common_ring_alloc(struct rte_mempool *mp) > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) > > { > > - int rg_flags = 0, ret; > > + int ret; > > char rg_name[RTE_RING_NAMESIZE]; > > struct rte_ring *r; > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) > > return -rte_errno; > > } > > > > - /* ring flags */ > > - if (mp->flags & MEMPOOL_F_SP_PUT) > > - rg_flags |= RING_F_SP_ENQ; > > - if (mp->flags & MEMPOOL_F_SC_GET) > > - rg_flags |= RING_F_SC_DEQ; > > - > > /* > > * Allocate the ring that will be used to store objects. > > * Ring functions will return appropriate errors if we are > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) > > return 0; > > } > > > > +static int > > +common_ring_alloc(struct rte_mempool *mp) > > +{ > > + uint32_t rg_flags; > > + > > + rg_flags = 0; > > Maybe it could go on the same line > > > + > > + /* ring flags */ > > Not sure we need to keep this comment > > > + if (mp->flags & MEMPOOL_F_SP_PUT) > > + rg_flags |= RING_F_SP_ENQ; > > + if (mp->flags & MEMPOOL_F_SC_GET) > > + rg_flags |= RING_F_SC_DEQ; > > + > > + return ring_alloc(mp, rg_flags); > > +} > > + > > +static int > > +rts_ring_alloc(struct rte_mempool *mp) > > +{ > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > + return -EINVAL; > > Why do we need this? It is a problem to allow sc/sp in this mode (even > if it's not optimal)? These new sync modes (RTS, HTS) are for MT. For SP/SC - there is simply no point to use MT sync modes. I suppose there are few choices: 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour and create actual ring with ST sync mode for prod/cons. 2. Report an error. 3. Silently ignore these flags. As I can see for "ring_mp_mc" ops, we doing #1, while for "stack" we are doing #3. For RTS/HTS I chosoe #2, as it seems cleaner to me. Any thoughts from your side what preferable behaviour should be? > > > + > > + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); > > +} > > + > > +static int > > +hts_ring_alloc(struct rte_mempool *mp) > > +{ > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > + return -EINVAL; > > + > > + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); > > +} > > + > > static void > > common_ring_free(struct rte_mempool *mp) > > { > > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = { > > .get_count = common_ring_get_count, > > }; > > > > +/* ops for mempool with ring in MT_RTS sync mode */ > > +static const struct rte_mempool_ops ops_mt_rts = { > > + .name = "ring_mt_rts", > > + .alloc = rts_ring_alloc, > > + .free = common_ring_free, > > + .enqueue = rts_ring_mp_enqueue, > > + .dequeue = rts_ring_mc_dequeue, > > + .get_count = common_ring_get_count, > > +}; > > + > > +/* ops for mempool with ring in MT_HTS sync mode */ > > +static const struct rte_mempool_ops ops_mt_hts = { > > + .name = "ring_mt_hts", > > + .alloc = hts_ring_alloc, > > + .free = common_ring_free, > > + .enqueue = hts_ring_mp_enqueue, > > + .dequeue = hts_ring_mc_dequeue, > > + .get_count = common_ring_get_count, > > +}; > > + > > MEMPOOL_REGISTER_OPS(ops_mp_mc); > > MEMPOOL_REGISTER_OPS(ops_sp_sc); > > MEMPOOL_REGISTER_OPS(ops_mp_sc); > > MEMPOOL_REGISTER_OPS(ops_sp_mc); > > +MEMPOOL_REGISTER_OPS(ops_mt_rts); > > +MEMPOOL_REGISTER_OPS(ops_mt_hts); > Not really related to your patch, but I think we need a function to > dump the name of available mempool ops. We could even add a description. > The problem we have is that a user does not know on which criteria is > should use a driver or another (except for platform drivers). Agree, it will be usefull. Though it probably subject for a separate patch.
Hi Konstantin, On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote: > Hi Olivier, > > > Hi Konstantin, > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote: > > > v2: > > > - update Release Notes (as per comments) > > > > > > Two new sync modes were introduced into rte_ring: > > > relaxed tail sync (RTS) and head/tail sync (HTS). > > > This change provides user with ability to select these > > > modes for ring based mempool via mempool ops API. > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > > > Acked-by: Gage Eads <gage.eads@intel.com> > > > --- > > > doc/guides/rel_notes/release_20_08.rst | 6 ++ > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- > > > 2 files changed, 94 insertions(+), 9 deletions(-) > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst > > > index eaaf11c37..7bdcf3aac 100644 > > > --- a/doc/guides/rel_notes/release_20_08.rst > > > +++ b/doc/guides/rel_notes/release_20_08.rst > > > @@ -84,6 +84,12 @@ New Features > > > * Dump ``rte_flow`` memory consumption. > > > * Measure packet per second forwarding. > > > > > > +* **Added support for new sync modes into mempool ring driver.** > > > + > > > + Added ability to select new ring synchronisation modes: > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > > > + via mempool ops API. > > > + > > > > > > Removed Items > > > ------------- > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c > > > index bc123fc52..15ec7dee7 100644 > > > --- a/drivers/mempool/ring/rte_mempool_ring.c > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > } > > > > > > +static int > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > + unsigned int n) > > > +{ > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > +} > > > + > > > +static int > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > + unsigned int n) > > > +{ > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > +} > > > + > > > static int > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > { > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > } > > > > > > +static int > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > +{ > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > +} > > > + > > > +static int > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > +{ > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > +} > > > + > > > static unsigned > > > common_ring_get_count(const struct rte_mempool *mp) > > > { > > > return rte_ring_count(mp->pool_data); > > > } > > > > > > - > > > static int > > > -common_ring_alloc(struct rte_mempool *mp) > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) > > > { > > > - int rg_flags = 0, ret; > > > + int ret; > > > char rg_name[RTE_RING_NAMESIZE]; > > > struct rte_ring *r; > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) > > > return -rte_errno; > > > } > > > > > > - /* ring flags */ > > > - if (mp->flags & MEMPOOL_F_SP_PUT) > > > - rg_flags |= RING_F_SP_ENQ; > > > - if (mp->flags & MEMPOOL_F_SC_GET) > > > - rg_flags |= RING_F_SC_DEQ; > > > - > > > /* > > > * Allocate the ring that will be used to store objects. > > > * Ring functions will return appropriate errors if we are > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) > > > return 0; > > > } > > > > > > +static int > > > +common_ring_alloc(struct rte_mempool *mp) > > > +{ > > > + uint32_t rg_flags; > > > + > > > + rg_flags = 0; > > > > Maybe it could go on the same line > > > > > + > > > + /* ring flags */ > > > > Not sure we need to keep this comment > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT) > > > + rg_flags |= RING_F_SP_ENQ; > > > + if (mp->flags & MEMPOOL_F_SC_GET) > > > + rg_flags |= RING_F_SC_DEQ; > > > + > > > + return ring_alloc(mp, rg_flags); > > > +} > > > + > > > +static int > > > +rts_ring_alloc(struct rte_mempool *mp) > > > +{ > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > > + return -EINVAL; > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even > > if it's not optimal)? > > These new sync modes (RTS, HTS) are for MT. > For SP/SC - there is simply no point to use MT sync modes. > I suppose there are few choices: > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour > and create actual ring with ST sync mode for prod/cons. > 2. Report an error. > 3. Silently ignore these flags. > > As I can see for "ring_mp_mc" ops, we doing #1, > while for "stack" we are doing #3. > For RTS/HTS I chosoe #2, as it seems cleaner to me. > Any thoughts from your side what preferable behaviour should be? The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc, ring_mp_mc). I don't think we should look at it when using specific ops. So I'll tend to say 3. is the correct thing to do. > > > > > > + > > > + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); > > > +} > > > + > > > +static int > > > +hts_ring_alloc(struct rte_mempool *mp) > > > +{ > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > > + return -EINVAL; > > > + > > > + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); > > > +} > > > + > > > static void > > > common_ring_free(struct rte_mempool *mp) > > > { > > > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = { > > > .get_count = common_ring_get_count, > > > }; > > > > > > +/* ops for mempool with ring in MT_RTS sync mode */ > > > +static const struct rte_mempool_ops ops_mt_rts = { > > > + .name = "ring_mt_rts", > > > + .alloc = rts_ring_alloc, > > > + .free = common_ring_free, > > > + .enqueue = rts_ring_mp_enqueue, > > > + .dequeue = rts_ring_mc_dequeue, > > > + .get_count = common_ring_get_count, > > > +}; > > > + > > > +/* ops for mempool with ring in MT_HTS sync mode */ > > > +static const struct rte_mempool_ops ops_mt_hts = { > > > + .name = "ring_mt_hts", > > > + .alloc = hts_ring_alloc, > > > + .free = common_ring_free, > > > + .enqueue = hts_ring_mp_enqueue, > > > + .dequeue = hts_ring_mc_dequeue, > > > + .get_count = common_ring_get_count, > > > +}; > > > + > > > MEMPOOL_REGISTER_OPS(ops_mp_mc); > > > MEMPOOL_REGISTER_OPS(ops_sp_sc); > > > MEMPOOL_REGISTER_OPS(ops_mp_sc); > > > MEMPOOL_REGISTER_OPS(ops_sp_mc); > > > +MEMPOOL_REGISTER_OPS(ops_mt_rts); > > > +MEMPOOL_REGISTER_OPS(ops_mt_hts); > > > Not really related to your patch, but I think we need a function to > > dump the name of available mempool ops. We could even add a description. > > The problem we have is that a user does not know on which criteria is > > should use a driver or another (except for platform drivers). > > Agree, it will be usefull. > Though it probably subject for a separate patch. >
Hi Olivier, > Hi Konstantin, > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote: > > Hi Olivier, > > > > > Hi Konstantin, > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote: > > > > v2: > > > > - update Release Notes (as per comments) > > > > > > > > Two new sync modes were introduced into rte_ring: > > > > relaxed tail sync (RTS) and head/tail sync (HTS). > > > > This change provides user with ability to select these > > > > modes for ring based mempool via mempool ops API. > > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > > > > Acked-by: Gage Eads <gage.eads@intel.com> > > > > --- > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++ > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- > > > > 2 files changed, 94 insertions(+), 9 deletions(-) > > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst > > > > index eaaf11c37..7bdcf3aac 100644 > > > > --- a/doc/guides/rel_notes/release_20_08.rst > > > > +++ b/doc/guides/rel_notes/release_20_08.rst > > > > @@ -84,6 +84,12 @@ New Features > > > > * Dump ``rte_flow`` memory consumption. > > > > * Measure packet per second forwarding. > > > > > > > > +* **Added support for new sync modes into mempool ring driver.** > > > > + > > > > + Added ability to select new ring synchronisation modes: > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > > > > + via mempool ops API. > > > > + > > > > > > > > Removed Items > > > > ------------- > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c > > > > index bc123fc52..15ec7dee7 100644 > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > } > > > > > > > > +static int > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > + unsigned int n) > > > > +{ > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > +} > > > > + > > > > +static int > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > + unsigned int n) > > > > +{ > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > +} > > > > + > > > > static int > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > > { > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > } > > > > > > > > +static int > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > > +{ > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > +} > > > > + > > > > +static int > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > > +{ > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > +} > > > > + > > > > static unsigned > > > > common_ring_get_count(const struct rte_mempool *mp) > > > > { > > > > return rte_ring_count(mp->pool_data); > > > > } > > > > > > > > - > > > > static int > > > > -common_ring_alloc(struct rte_mempool *mp) > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) > > > > { > > > > - int rg_flags = 0, ret; > > > > + int ret; > > > > char rg_name[RTE_RING_NAMESIZE]; > > > > struct rte_ring *r; > > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) > > > > return -rte_errno; > > > > } > > > > > > > > - /* ring flags */ > > > > - if (mp->flags & MEMPOOL_F_SP_PUT) > > > > - rg_flags |= RING_F_SP_ENQ; > > > > - if (mp->flags & MEMPOOL_F_SC_GET) > > > > - rg_flags |= RING_F_SC_DEQ; > > > > - > > > > /* > > > > * Allocate the ring that will be used to store objects. > > > > * Ring functions will return appropriate errors if we are > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) > > > > return 0; > > > > } > > > > > > > > +static int > > > > +common_ring_alloc(struct rte_mempool *mp) > > > > +{ > > > > + uint32_t rg_flags; > > > > + > > > > + rg_flags = 0; > > > > > > Maybe it could go on the same line > > > > > > > + > > > > + /* ring flags */ > > > > > > Not sure we need to keep this comment > > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT) > > > > + rg_flags |= RING_F_SP_ENQ; > > > > + if (mp->flags & MEMPOOL_F_SC_GET) > > > > + rg_flags |= RING_F_SC_DEQ; > > > > + > > > > + return ring_alloc(mp, rg_flags); > > > > +} > > > > + > > > > +static int > > > > +rts_ring_alloc(struct rte_mempool *mp) > > > > +{ > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > > > + return -EINVAL; > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even > > > if it's not optimal)? > > > > These new sync modes (RTS, HTS) are for MT. > > For SP/SC - there is simply no point to use MT sync modes. > > I suppose there are few choices: > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour > > and create actual ring with ST sync mode for prod/cons. > > 2. Report an error. > > 3. Silently ignore these flags. > > > > As I can see for "ring_mp_mc" ops, we doing #1, > > while for "stack" we are doing #3. > > For RTS/HTS I chosoe #2, as it seems cleaner to me. > > Any thoughts from your side what preferable behaviour should be? > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc, > ring_mp_mc). As I understand, nothing prevents user from doing: mp = rte_mempool_create_empty(name, n, elt_size, cache_size, sizeof(struct rte_pktmbuf_pool_private), socket_id, 0); >I don't think we should look at it when using specific ops. > > So I'll tend to say 3. is the correct thing to do. Ok, will resend v3 then. > > > > > > > > > > + > > > > + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); > > > > +} > > > > + > > > > +static int > > > > +hts_ring_alloc(struct rte_mempool *mp) > > > > +{ > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > > > + return -EINVAL; > > > > + > > > > + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); > > > > +} > > > > + > > > > static void > > > > common_ring_free(struct rte_mempool *mp) > > > > { > > > > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = { > > > > .get_count = common_ring_get_count, > > > > }; > > > > > > > > +/* ops for mempool with ring in MT_RTS sync mode */ > > > > +static const struct rte_mempool_ops ops_mt_rts = { > > > > + .name = "ring_mt_rts", > > > > + .alloc = rts_ring_alloc, > > > > + .free = common_ring_free, > > > > + .enqueue = rts_ring_mp_enqueue, > > > > + .dequeue = rts_ring_mc_dequeue, > > > > + .get_count = common_ring_get_count, > > > > +}; > > > > + > > > > +/* ops for mempool with ring in MT_HTS sync mode */ > > > > +static const struct rte_mempool_ops ops_mt_hts = { > > > > + .name = "ring_mt_hts", > > > > + .alloc = hts_ring_alloc, > > > > + .free = common_ring_free, > > > > + .enqueue = hts_ring_mp_enqueue, > > > > + .dequeue = hts_ring_mc_dequeue, > > > > + .get_count = common_ring_get_count, > > > > +}; > > > > + > > > > MEMPOOL_REGISTER_OPS(ops_mp_mc); > > > > MEMPOOL_REGISTER_OPS(ops_sp_sc); > > > > MEMPOOL_REGISTER_OPS(ops_mp_sc); > > > > MEMPOOL_REGISTER_OPS(ops_sp_mc); > > > > +MEMPOOL_REGISTER_OPS(ops_mt_rts); > > > > +MEMPOOL_REGISTER_OPS(ops_mt_hts); > > > > > Not really related to your patch, but I think we need a function to > > > dump the name of available mempool ops. We could even add a description. > > > The problem we have is that a user does not know on which criteria is > > > should use a driver or another (except for platform drivers). > > > > Agree, it will be usefull. > > Though it probably subject for a separate patch. > >
> > Hi Olivier, > > > Hi Konstantin, > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote: > > > Hi Olivier, > > > > > > > Hi Konstantin, > > > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote: > > > > > v2: > > > > > - update Release Notes (as per comments) > > > > > > > > > > Two new sync modes were introduced into rte_ring: > > > > > relaxed tail sync (RTS) and head/tail sync (HTS). > > > > > This change provides user with ability to select these > > > > > modes for ring based mempool via mempool ops API. > > > > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > > > > > Acked-by: Gage Eads <gage.eads@intel.com> > > > > > --- > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++ > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- > > > > > 2 files changed, 94 insertions(+), 9 deletions(-) > > > > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst > > > > > index eaaf11c37..7bdcf3aac 100644 > > > > > --- a/doc/guides/rel_notes/release_20_08.rst > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst > > > > > @@ -84,6 +84,12 @@ New Features > > > > > * Dump ``rte_flow`` memory consumption. > > > > > * Measure packet per second forwarding. > > > > > > > > > > +* **Added support for new sync modes into mempool ring driver.** > > > > > + > > > > > + Added ability to select new ring synchronisation modes: > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > > > > > + via mempool ops API. > > > > > + > > > > > > > > > > Removed Items > > > > > ------------- > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c > > > > > index bc123fc52..15ec7dee7 100644 > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > } > > > > > > > > > > +static int > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > + unsigned int n) > > > > > +{ > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > +} > > > > > + > > > > > +static int > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > + unsigned int n) > > > > > +{ > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > +} > > > > > + > > > > > static int > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > > > { > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > } > > > > > > > > > > +static int > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > > > +{ > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > +} > > > > > + > > > > > +static int > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > > > +{ > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > +} > > > > > + > > > > > static unsigned > > > > > common_ring_get_count(const struct rte_mempool *mp) > > > > > { > > > > > return rte_ring_count(mp->pool_data); > > > > > } > > > > > > > > > > - > > > > > static int > > > > > -common_ring_alloc(struct rte_mempool *mp) > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) > > > > > { > > > > > - int rg_flags = 0, ret; > > > > > + int ret; > > > > > char rg_name[RTE_RING_NAMESIZE]; > > > > > struct rte_ring *r; > > > > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) > > > > > return -rte_errno; > > > > > } > > > > > > > > > > - /* ring flags */ > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT) > > > > > - rg_flags |= RING_F_SP_ENQ; > > > > > - if (mp->flags & MEMPOOL_F_SC_GET) > > > > > - rg_flags |= RING_F_SC_DEQ; > > > > > - > > > > > /* > > > > > * Allocate the ring that will be used to store objects. > > > > > * Ring functions will return appropriate errors if we are > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) > > > > > return 0; > > > > > } > > > > > > > > > > +static int > > > > > +common_ring_alloc(struct rte_mempool *mp) > > > > > +{ > > > > > + uint32_t rg_flags; > > > > > + > > > > > + rg_flags = 0; > > > > > > > > Maybe it could go on the same line > > > > > > > > > + > > > > > + /* ring flags */ > > > > > > > > Not sure we need to keep this comment > > > > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT) > > > > > + rg_flags |= RING_F_SP_ENQ; > > > > > + if (mp->flags & MEMPOOL_F_SC_GET) > > > > > + rg_flags |= RING_F_SC_DEQ; > > > > > + > > > > > + return ring_alloc(mp, rg_flags); > > > > > +} > > > > > + > > > > > +static int > > > > > +rts_ring_alloc(struct rte_mempool *mp) > > > > > +{ > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > > > > + return -EINVAL; > > > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even > > > > if it's not optimal)? > > > > > > These new sync modes (RTS, HTS) are for MT. > > > For SP/SC - there is simply no point to use MT sync modes. > > > I suppose there are few choices: > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour > > > and create actual ring with ST sync mode for prod/cons. > > > 2. Report an error. > > > 3. Silently ignore these flags. > > > > > > As I can see for "ring_mp_mc" ops, we doing #1, > > > while for "stack" we are doing #3. > > > For RTS/HTS I chosoe #2, as it seems cleaner to me. > > > Any thoughts from your side what preferable behaviour should be? > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc, > > ring_mp_mc). > > As I understand, nothing prevents user from doing: > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size, > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0); Apologies, hit send accidently. I meant user can do: mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET); rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); An in that case, he'll get SP/SC ring underneath. > > > >I don't think we should look at it when using specific ops. > > > > So I'll tend to say 3. is the correct thing to do. > > Ok, will resend v3 then. >
Two new sync modes were introduced into rte_ring: relaxed tail sync (RTS) and head/tail sync (HTS). This change provides user with ability to select these modes for ring based mempool via mempool ops API. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Acked-by: Gage Eads <gage.eads@intel.com> --- v3: silently ignore F_SP_PUT/F_SC_GET (Olivier comment) v2: update Release Notes (as per comments) doc/guides/rel_notes/release_20_08.rst | 6 ++ drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++--- 2 files changed, 85 insertions(+), 9 deletions(-) diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst index 17d70e7c1..fff819fca 100644 --- a/doc/guides/rel_notes/release_20_08.rst +++ b/doc/guides/rel_notes/release_20_08.rst @@ -171,6 +171,12 @@ New Features See the :doc:`../sample_app_ug/l2_forward_real_virtual` for more details of this parameter usage. +* **Added support for new sync modes into mempool ring driver.** + + Added ability to select new ring synchronisation modes: + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` + via mempool ops API. + Removed Items ------------- diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c index bc123fc52..b1f09ff28 100644 --- a/drivers/mempool/ring/rte_mempool_ring.c +++ b/drivers/mempool/ring/rte_mempool_ring.c @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static int common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) { @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static unsigned common_ring_get_count(const struct rte_mempool *mp) { return rte_ring_count(mp->pool_data); } - static int -common_ring_alloc(struct rte_mempool *mp) +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) { - int rg_flags = 0, ret; + int ret; char rg_name[RTE_RING_NAMESIZE]; struct rte_ring *r; @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) return -rte_errno; } - /* ring flags */ - if (mp->flags & MEMPOOL_F_SP_PUT) - rg_flags |= RING_F_SP_ENQ; - if (mp->flags & MEMPOOL_F_SC_GET) - rg_flags |= RING_F_SC_DEQ; - /* * Allocate the ring that will be used to store objects. * Ring functions will return appropriate errors if we are @@ -82,6 +105,31 @@ common_ring_alloc(struct rte_mempool *mp) return 0; } +static int +common_ring_alloc(struct rte_mempool *mp) +{ + uint32_t rg_flags = 0; + + if (mp->flags & MEMPOOL_F_SP_PUT) + rg_flags |= RING_F_SP_ENQ; + if (mp->flags & MEMPOOL_F_SC_GET) + rg_flags |= RING_F_SC_DEQ; + + return ring_alloc(mp, rg_flags); +} + +static int +rts_ring_alloc(struct rte_mempool *mp) +{ + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); +} + +static int +hts_ring_alloc(struct rte_mempool *mp) +{ + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); +} + static void common_ring_free(struct rte_mempool *mp) { @@ -130,7 +178,29 @@ static const struct rte_mempool_ops ops_sp_mc = { .get_count = common_ring_get_count, }; +/* ops for mempool with ring in MT_RTS sync mode */ +static const struct rte_mempool_ops ops_mt_rts = { + .name = "ring_mt_rts", + .alloc = rts_ring_alloc, + .free = common_ring_free, + .enqueue = rts_ring_mp_enqueue, + .dequeue = rts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + +/* ops for mempool with ring in MT_HTS sync mode */ +static const struct rte_mempool_ops ops_mt_hts = { + .name = "ring_mt_hts", + .alloc = hts_ring_alloc, + .free = common_ring_free, + .enqueue = hts_ring_mp_enqueue, + .dequeue = hts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + MEMPOOL_REGISTER_OPS(ops_mp_mc); MEMPOOL_REGISTER_OPS(ops_sp_sc); MEMPOOL_REGISTER_OPS(ops_mp_sc); MEMPOOL_REGISTER_OPS(ops_sp_mc); +MEMPOOL_REGISTER_OPS(ops_mt_rts); +MEMPOOL_REGISTER_OPS(ops_mt_hts); -- 2.17.1
10/07/2020 18:21, Konstantin Ananyev: > Two new sync modes were introduced into rte_ring: > relaxed tail sync (RTS) and head/tail sync (HTS). > This change provides user with ability to select these > modes for ring based mempool via mempool ops API. No, the user cannot use these modes because there is no doc, except in release notes. Sorry I don't understand why I have to do such obvious comment after so many years. And more importantly, why nobody else asked for doc? I think I will add "UNMAINTAINED" for doc in MAINTAINERS file. If nobody is interested in doc maintenance, we could "git rm doc/". Sorry in advance for hijacking this thread. [...] > --- a/doc/guides/rel_notes/release_20_08.rst > +++ b/doc/guides/rel_notes/release_20_08.rst > @@ -171,6 +171,12 @@ New Features > See the :doc:`../sample_app_ug/l2_forward_real_virtual` for more > details of this parameter usage. > > +* **Added support for new sync modes into mempool ring driver.** > + > + Added ability to select new ring synchronisation modes: > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > + via mempool ops API. Please check the comment on top of new features about the ordering. Mempool features should be mentioned earlier in the list.
> 10/07/2020 18:21, Konstantin Ananyev: > > Two new sync modes were introduced into rte_ring: > > relaxed tail sync (RTS) and head/tail sync (HTS). > > This change provides user with ability to select these > > modes for ring based mempool via mempool ops API. > > No, the user cannot use these modes because there is no doc, > except in release notes. AFAIK, there is no section for rte_ring and others SW based mempool drivers: https://doc.dpdk.org/guides/mempool/index.html Are you asking me to add such section as part of this patch? > > Sorry I don't understand why I have to do such obvious comment > after so many years. > And more importantly, why nobody else asked for doc? > > I think I will add "UNMAINTAINED" for doc in MAINTAINERS file. > If nobody is interested in doc maintenance, we could "git rm doc/". > > Sorry in advance for hijacking this thread. > > [...] > > --- a/doc/guides/rel_notes/release_20_08.rst > > +++ b/doc/guides/rel_notes/release_20_08.rst > > @@ -171,6 +171,12 @@ New Features > > See the :doc:`../sample_app_ug/l2_forward_real_virtual` for more > > details of this parameter usage. > > > > +* **Added support for new sync modes into mempool ring driver.** > > + > > + Added ability to select new ring synchronisation modes: > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > > + via mempool ops API. > > Please check the comment on top of new features about the ordering. > Mempool features should be mentioned earlier in the list. > >
Hi Konstantin, On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote: > > > > > > Hi Olivier, > > > > > Hi Konstantin, > > > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote: > > > > Hi Olivier, > > > > > > > > > Hi Konstantin, > > > > > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote: > > > > > > v2: > > > > > > - update Release Notes (as per comments) > > > > > > > > > > > > Two new sync modes were introduced into rte_ring: > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS). > > > > > > This change provides user with ability to select these > > > > > > modes for ring based mempool via mempool ops API. > > > > > > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > > > > > > Acked-by: Gage Eads <gage.eads@intel.com> > > > > > > --- > > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++ > > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- > > > > > > 2 files changed, 94 insertions(+), 9 deletions(-) > > > > > > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst > > > > > > index eaaf11c37..7bdcf3aac 100644 > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst > > > > > > @@ -84,6 +84,12 @@ New Features > > > > > > * Dump ``rte_flow`` memory consumption. > > > > > > * Measure packet per second forwarding. > > > > > > > > > > > > +* **Added support for new sync modes into mempool ring driver.** > > > > > > + > > > > > > + Added ability to select new ring synchronisation modes: > > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > > > > > > + via mempool ops API. > > > > > > + > > > > > > > > > > > > Removed Items > > > > > > ------------- > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c > > > > > > index bc123fc52..15ec7dee7 100644 > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > } > > > > > > > > > > > > +static int > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > > + unsigned int n) > > > > > > +{ > > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > +} > > > > > > + > > > > > > +static int > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > > + unsigned int n) > > > > > > +{ > > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > +} > > > > > > + > > > > > > static int > > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > > > > { > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > } > > > > > > > > > > > > +static int > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > > > > +{ > > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > +} > > > > > > + > > > > > > +static int > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > > > > +{ > > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > +} > > > > > > + > > > > > > static unsigned > > > > > > common_ring_get_count(const struct rte_mempool *mp) > > > > > > { > > > > > > return rte_ring_count(mp->pool_data); > > > > > > } > > > > > > > > > > > > - > > > > > > static int > > > > > > -common_ring_alloc(struct rte_mempool *mp) > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) > > > > > > { > > > > > > - int rg_flags = 0, ret; > > > > > > + int ret; > > > > > > char rg_name[RTE_RING_NAMESIZE]; > > > > > > struct rte_ring *r; > > > > > > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) > > > > > > return -rte_errno; > > > > > > } > > > > > > > > > > > > - /* ring flags */ > > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT) > > > > > > - rg_flags |= RING_F_SP_ENQ; > > > > > > - if (mp->flags & MEMPOOL_F_SC_GET) > > > > > > - rg_flags |= RING_F_SC_DEQ; > > > > > > - > > > > > > /* > > > > > > * Allocate the ring that will be used to store objects. > > > > > > * Ring functions will return appropriate errors if we are > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) > > > > > > return 0; > > > > > > } > > > > > > > > > > > > +static int > > > > > > +common_ring_alloc(struct rte_mempool *mp) > > > > > > +{ > > > > > > + uint32_t rg_flags; > > > > > > + > > > > > > + rg_flags = 0; > > > > > > > > > > Maybe it could go on the same line > > > > > > > > > > > + > > > > > > + /* ring flags */ > > > > > > > > > > Not sure we need to keep this comment > > > > > > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT) > > > > > > + rg_flags |= RING_F_SP_ENQ; > > > > > > + if (mp->flags & MEMPOOL_F_SC_GET) > > > > > > + rg_flags |= RING_F_SC_DEQ; > > > > > > + > > > > > > + return ring_alloc(mp, rg_flags); > > > > > > +} > > > > > > + > > > > > > +static int > > > > > > +rts_ring_alloc(struct rte_mempool *mp) > > > > > > +{ > > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > > > > > + return -EINVAL; > > > > > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even > > > > > if it's not optimal)? > > > > > > > > These new sync modes (RTS, HTS) are for MT. > > > > For SP/SC - there is simply no point to use MT sync modes. > > > > I suppose there are few choices: > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour > > > > and create actual ring with ST sync mode for prod/cons. > > > > 2. Report an error. > > > > 3. Silently ignore these flags. > > > > > > > > As I can see for "ring_mp_mc" ops, we doing #1, > > > > while for "stack" we are doing #3. > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me. > > > > Any thoughts from your side what preferable behaviour should be? > > > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc, > > > ring_mp_mc). > > > > As I understand, nothing prevents user from doing: > > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size, > > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0); > > Apologies, hit send accidently. > I meant user can do: > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET); > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); > > An in that case, he'll get SP/SC ring underneath. It looks it's not the case. Since commit 449c49b93a6b ("mempool: support handler operations"), the flags SP_PUT/SC_GET are converted into a call to rte_mempool_set_ops_byname() in rte_mempool_create() only. In rte_mempool_create_empty(), these flags are ignored. It is expected that the user calls rte_mempool_set_ops_byname() by itself. I don't think it is a good behavior: 1/ The documentation of rte_mempool_create_empty() does not say that the flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET sets the default ops like rte_mempool_create(). 2/ If rte_mempool_set_ops_byname() is not called after rte_mempool_create_empty() (and it looks it happens in dpdk's code), the default ops are the ones registered at index 0. This depends on the link order. So I propose to move the following code in rte_mempool_create_empty(). if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET)) ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); else if (flags & MEMPOOL_F_SP_PUT) ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); else if (flags & MEMPOOL_F_SC_GET) ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); else ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); What do you think? > > > > > > > >I don't think we should look at it when using specific ops. > > > > > > So I'll tend to say 3. is the correct thing to do. > > > > Ok, will resend v3 then. > >
13/07/2020 14:58, Ananyev, Konstantin: > > 10/07/2020 18:21, Konstantin Ananyev: > > > Two new sync modes were introduced into rte_ring: > > > relaxed tail sync (RTS) and head/tail sync (HTS). > > > This change provides user with ability to select these > > > modes for ring based mempool via mempool ops API. > > > > No, the user cannot use these modes because there is no doc, > > except in release notes. > > AFAIK, there is no section for rte_ring and others SW based > mempool drivers: > https://doc.dpdk.org/guides/mempool/index.html > Are you asking me to add such section as part of this patch? I'm asking to provide some documentation to the users yes. It can be in doc you pointed above with a link from the prog guide: https://doc.dpdk.org/guides/prog_guide/mempool_lib.html#mempool-handlers And because it is solving issues in some use cases, it needs to be called out why and when use these options.
Hi Olivier, > Hi Konstantin, > > On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote: > > > > > > > > > > Hi Olivier, > > > > > > > Hi Konstantin, > > > > > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote: > > > > > Hi Olivier, > > > > > > > > > > > Hi Konstantin, > > > > > > > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote: > > > > > > > v2: > > > > > > > - update Release Notes (as per comments) > > > > > > > > > > > > > > Two new sync modes were introduced into rte_ring: > > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS). > > > > > > > This change provides user with ability to select these > > > > > > > modes for ring based mempool via mempool ops API. > > > > > > > > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > > > > > > > Acked-by: Gage Eads <gage.eads@intel.com> > > > > > > > --- > > > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++ > > > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++--- > > > > > > > 2 files changed, 94 insertions(+), 9 deletions(-) > > > > > > > > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst > > > > > > > index eaaf11c37..7bdcf3aac 100644 > > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst > > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst > > > > > > > @@ -84,6 +84,12 @@ New Features > > > > > > > * Dump ``rte_flow`` memory consumption. > > > > > > > * Measure packet per second forwarding. > > > > > > > > > > > > > > +* **Added support for new sync modes into mempool ring driver.** > > > > > > > + > > > > > > > + Added ability to select new ring synchronisation modes: > > > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` > > > > > > > + via mempool ops API. > > > > > > > + > > > > > > > > > > > > > > Removed Items > > > > > > > ------------- > > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c > > > > > > > index bc123fc52..15ec7dee7 100644 > > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c > > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c > > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > > } > > > > > > > > > > > > > > +static int > > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > > > + unsigned int n) > > > > > > > +{ > > > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > > +} > > > > > > > + > > > > > > > +static int > > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, > > > > > > > + unsigned int n) > > > > > > > +{ > > > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > > +} > > > > > > > + > > > > > > > static int > > > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > > > > > { > > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > > } > > > > > > > > > > > > > > +static int > > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > > > > > +{ > > > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > > +} > > > > > > > + > > > > > > > +static int > > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) > > > > > > > +{ > > > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; > > > > > > > +} > > > > > > > + > > > > > > > static unsigned > > > > > > > common_ring_get_count(const struct rte_mempool *mp) > > > > > > > { > > > > > > > return rte_ring_count(mp->pool_data); > > > > > > > } > > > > > > > > > > > > > > - > > > > > > > static int > > > > > > > -common_ring_alloc(struct rte_mempool *mp) > > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) > > > > > > > { > > > > > > > - int rg_flags = 0, ret; > > > > > > > + int ret; > > > > > > > char rg_name[RTE_RING_NAMESIZE]; > > > > > > > struct rte_ring *r; > > > > > > > > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) > > > > > > > return -rte_errno; > > > > > > > } > > > > > > > > > > > > > > - /* ring flags */ > > > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT) > > > > > > > - rg_flags |= RING_F_SP_ENQ; > > > > > > > - if (mp->flags & MEMPOOL_F_SC_GET) > > > > > > > - rg_flags |= RING_F_SC_DEQ; > > > > > > > - > > > > > > > /* > > > > > > > * Allocate the ring that will be used to store objects. > > > > > > > * Ring functions will return appropriate errors if we are > > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp) > > > > > > > return 0; > > > > > > > } > > > > > > > > > > > > > > +static int > > > > > > > +common_ring_alloc(struct rte_mempool *mp) > > > > > > > +{ > > > > > > > + uint32_t rg_flags; > > > > > > > + > > > > > > > + rg_flags = 0; > > > > > > > > > > > > Maybe it could go on the same line > > > > > > > > > > > > > + > > > > > > > + /* ring flags */ > > > > > > > > > > > > Not sure we need to keep this comment > > > > > > > > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT) > > > > > > > + rg_flags |= RING_F_SP_ENQ; > > > > > > > + if (mp->flags & MEMPOOL_F_SC_GET) > > > > > > > + rg_flags |= RING_F_SC_DEQ; > > > > > > > + > > > > > > > + return ring_alloc(mp, rg_flags); > > > > > > > +} > > > > > > > + > > > > > > > +static int > > > > > > > +rts_ring_alloc(struct rte_mempool *mp) > > > > > > > +{ > > > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0) > > > > > > > + return -EINVAL; > > > > > > > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even > > > > > > if it's not optimal)? > > > > > > > > > > These new sync modes (RTS, HTS) are for MT. > > > > > For SP/SC - there is simply no point to use MT sync modes. > > > > > I suppose there are few choices: > > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour > > > > > and create actual ring with ST sync mode for prod/cons. > > > > > 2. Report an error. > > > > > 3. Silently ignore these flags. > > > > > > > > > > As I can see for "ring_mp_mc" ops, we doing #1, > > > > > while for "stack" we are doing #3. > > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me. > > > > > Any thoughts from your side what preferable behaviour should be? > > > > > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select > > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc, > > > > ring_mp_mc). > > > > > > As I understand, nothing prevents user from doing: > > > > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size, > > > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0); > > > > Apologies, hit send accidently. > > I meant user can do: > > > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET); > > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); > > > > An in that case, he'll get SP/SC ring underneath. > > It looks it's not the case. Since commit 449c49b93a6b ("mempool: support > handler operations"), the flags SP_PUT/SC_GET are converted into a call > to rte_mempool_set_ops_byname() in rte_mempool_create() only. > > In rte_mempool_create_empty(), these flags are ignored. It is expected > that the user calls rte_mempool_set_ops_byname() by itself. As I understand the code - not exactly. rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value, but it does store it's value inside mp->flags. Later, when mempool_ops_alloc_once() is called these flags will be used by common_ring_alloc() and might override selected by ops ring behaviour. > > I don't think it is a good behavior: > > 1/ The documentation of rte_mempool_create_empty() does not say that the > flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET > sets the default ops like rte_mempool_create(). > > 2/ If rte_mempool_set_ops_byname() is not called after > rte_mempool_create_empty() (and it looks it happens in dpdk's code), > the default ops are the ones registered at index 0. This depends on > the link order. > > So I propose to move the following code in > rte_mempool_create_empty(). > > if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET)) > ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); > else if (flags & MEMPOOL_F_SP_PUT) > ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); > else if (flags & MEMPOOL_F_SC_GET) > ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); > else > ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); > > What do you think? I think it will be a good thing - as in that case we'll always have "ring_mp_mc" selected as default one. As another thought, it porbably would be good to deprecate and later remove MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely. These days user can select this behaviour via mempool ops and such dualism just makes things more error-prone and harder to maintain. Especially as we don't have clear policy what should be the higher priority for sync mode selection: mempool ops or flags.
On Mon, Jul 13, 2020 at 02:46:35PM +0000, Ananyev, Konstantin wrote:
> Hi Olivier,
>
> > Hi Konstantin,
> >
> > On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> > >
> > >
> > > >
> > > > Hi Olivier,
> > > >
> > > > > Hi Konstantin,
> > > > >
> > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > > Hi Olivier,
> > > > > >
> > > > > > > Hi Konstantin,
> > > > > > >
> > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > > > v2:
> > > > > > > > - update Release Notes (as per comments)
> > > > > > > >
> > > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > > This change provides user with ability to select these
> > > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > > >
> > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > > > ---
> > > > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > > >
> > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > > > * Dump ``rte_flow`` memory consumption.
> > > > > > > > * Measure packet per second forwarding.
> > > > > > > >
> > > > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > > > +
> > > > > > > > + Added ability to select new ring synchronisation modes:
> > > > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > > > + via mempool ops API.
> > > > > > > > +
> > > > > > > >
> > > > > > > > Removed Items
> > > > > > > > -------------
> > > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > + unsigned int n)
> > > > > > > > +{
> > > > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > + unsigned int n)
> > > > > > > > +{
> > > > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > static int
> > > > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > {
> > > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > +{
> > > > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > +{
> > > > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > static unsigned
> > > > > > > > common_ring_get_count(const struct rte_mempool *mp)
> > > > > > > > {
> > > > > > > > return rte_ring_count(mp->pool_data);
> > > > > > > > }
> > > > > > > >
> > > > > > > > -
> > > > > > > > static int
> > > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > > > {
> > > > > > > > - int rg_flags = 0, ret;
> > > > > > > > + int ret;
> > > > > > > > char rg_name[RTE_RING_NAMESIZE];
> > > > > > > > struct rte_ring *r;
> > > > > > > >
> > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > return -rte_errno;
> > > > > > > > }
> > > > > > > >
> > > > > > > > - /* ring flags */
> > > > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > - rg_flags |= RING_F_SP_ENQ;
> > > > > > > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > - rg_flags |= RING_F_SC_DEQ;
> > > > > > > > -
> > > > > > > > /*
> > > > > > > > * Allocate the ring that will be used to store objects.
> > > > > > > > * Ring functions will return appropriate errors if we are
> > > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > return 0;
> > > > > > > > }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +{
> > > > > > > > + uint32_t rg_flags;
> > > > > > > > +
> > > > > > > > + rg_flags = 0;
> > > > > > >
> > > > > > > Maybe it could go on the same line
> > > > > > >
> > > > > > > > +
> > > > > > > > + /* ring flags */
> > > > > > >
> > > > > > > Not sure we need to keep this comment
> > > > > > >
> > > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > + rg_flags |= RING_F_SP_ENQ;
> > > > > > > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > + rg_flags |= RING_F_SC_DEQ;
> > > > > > > > +
> > > > > > > > + return ring_alloc(mp, rg_flags);
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +{
> > > > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > > > + return -EINVAL;
> > > > > > >
> > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > > > if it's not optimal)?
> > > > > >
> > > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > > I suppose there are few choices:
> > > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > > > and create actual ring with ST sync mode for prod/cons.
> > > > > > 2. Report an error.
> > > > > > 3. Silently ignore these flags.
> > > > > >
> > > > > > As I can see for "ring_mp_mc" ops, we doing #1,
> > > > > > while for "stack" we are doing #3.
> > > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > > Any thoughts from your side what preferable behaviour should be?
> > > > >
> > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > > ring_mp_mc).
> > > >
> > > > As I understand, nothing prevents user from doing:
> > > >
> > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > > > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> > >
> > > Apologies, hit send accidently.
> > > I meant user can do:
> > >
> > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > >
> > > An in that case, he'll get SP/SC ring underneath.
> >
> > It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> > handler operations"), the flags SP_PUT/SC_GET are converted into a call
> > to rte_mempool_set_ops_byname() in rte_mempool_create() only.
> >
> > In rte_mempool_create_empty(), these flags are ignored. It is expected
> > that the user calls rte_mempool_set_ops_byname() by itself.
>
> As I understand the code - not exactly.
> rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value,
> but it does store it's value inside mp->flags.
> Later, when mempool_ops_alloc_once() is called these flags will be used by
> common_ring_alloc() and might override selected by ops ring behaviour.
>
> >
> > I don't think it is a good behavior:
> >
> > 1/ The documentation of rte_mempool_create_empty() does not say that the
> > flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
> > sets the default ops like rte_mempool_create().
> >
> > 2/ If rte_mempool_set_ops_byname() is not called after
> > rte_mempool_create_empty() (and it looks it happens in dpdk's code),
> > the default ops are the ones registered at index 0. This depends on
> > the link order.
> >
> > So I propose to move the following code in
> > rte_mempool_create_empty().
> >
> > if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> > ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> > else if (flags & MEMPOOL_F_SP_PUT)
> > ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> > else if (flags & MEMPOOL_F_SC_GET)
> > ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> > else
> > ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> >
> > What do you think?
>
> I think it will be a good thing - as in that case we'll always have
> "ring_mp_mc" selected as default one.
> As another thought, it porbably would be good to deprecate and later remove
> MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
> These days user can select this behaviour via mempool ops and such dualism
> just makes things more error-prone and harder to maintain.
> Especially as we don't have clear policy what should be the higher priority
> for sync mode selection: mempool ops or flags.
>
I'll tend to agree, however it would mean deprecate rte_mempool_create()
too, because we wouldn't be able to set ops with it. Or we would have to
add a 12th (!) argument to the function, to set the ops name.
I don't like having that many arguments to this function, but it seems
it is widely used, probably because it is just one function call (vs
create_empty + set_ops + populate). So adding a "ops_name" argument is
maybe the right thing to do, given we can keep abi compat.
v4: add new section into mempool derivers guide (Thomas Monjalon comments) v3: silently ignore F_SP_PUT/F_SC_GET (Olivier Matz comments) v2: update Release Notes (Gage Eads comments) Konstantin Ananyev (2): doc: add ring based mempool guide mempool/ring: add support for new ring sync modes doc/guides/mempool/index.rst | 1 + doc/guides/mempool/ring.rst | 44 +++++++++++++ doc/guides/rel_notes/release_20_08.rst | 6 ++ drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++--- 4 files changed, 130 insertions(+), 9 deletions(-) create mode 100644 doc/guides/mempool/ring.rst -- 2.17.1
Add documentation for rte_ring mempool driver. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> --- doc/guides/mempool/index.rst | 1 + doc/guides/mempool/ring.rst | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 doc/guides/mempool/ring.rst diff --git a/doc/guides/mempool/index.rst b/doc/guides/mempool/index.rst index 756610264..bbd02fd81 100644 --- a/doc/guides/mempool/index.rst +++ b/doc/guides/mempool/index.rst @@ -13,3 +13,4 @@ application through the mempool API. octeontx octeontx2 + ring diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst new file mode 100644 index 000000000..b8659c03f --- /dev/null +++ b/doc/guides/mempool/ring.rst @@ -0,0 +1,34 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2020 Intel Corporation. + +Ring Mempool Driver +============================== + +**rte_mempool_ring** is a pure SW mempool driver based on ``rte_ring`` +DPDK library. This is a default mempool driver. +Following modes of operation are available for ``ring`` mempool driver +and can be selected via mempool ops API: + +- ``ring_mp_mc`` + + Underlying **rte_ring** operates in multi-thread producer, + multi-thread consumer sync mode. + +- ``ring_sp_sc`` + + Underlying **rte_ring** operates in single-thread producer, + single-thread consumer sync mode. + +- ``ring_sp_mc`` + + Underlying **rte_ring** operates in single-thread producer, + multi-thread consumer sync mode. + +- ``ring_mp_sc`` + + Underlying **rte_ring** operates in multi-thread producer, + single-thread consumer sync mode. + + +For more information about ``rte_ring`` structure, behaviour and available +synchronisation modes please refer to: :doc:`../prog_guide/ring_lib`. -- 2.17.1
Two new sync modes were introduced into rte_ring: relaxed tail sync (RTS) and head/tail sync (HTS). This change provides user with ability to select these modes for ring based mempool via mempool ops API. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Acked-by: Gage Eads <gage.eads@intel.com> --- doc/guides/mempool/ring.rst | 10 +++ doc/guides/rel_notes/release_20_08.rst | 6 ++ drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++--- 3 files changed, 95 insertions(+), 9 deletions(-) diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst index b8659c03f..9e2824070 100644 --- a/doc/guides/mempool/ring.rst +++ b/doc/guides/mempool/ring.rst @@ -29,6 +29,16 @@ and can be selected via mempool ops API: Underlying **rte_ring** operates in multi-thread producer, single-thread consumer sync mode. +- ``ring_mt_rts`` + + For underlying **rte_ring** both producer and consumer operate in + multi-thread Relaxed Tail Sync (RTS) mode. + +- ``ring_mt_hts`` + + For underlying **rte_ring** both producer and consumer operate in + multi-thread Haad-Tail Sync (HTS) mode. + For more information about ``rte_ring`` structure, behaviour and available synchronisation modes please refer to: :doc:`../prog_guide/ring_lib`. diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst index 17d70e7c1..db25c6f9c 100644 --- a/doc/guides/rel_notes/release_20_08.rst +++ b/doc/guides/rel_notes/release_20_08.rst @@ -69,6 +69,12 @@ New Features barriers. rte_*mb APIs, for ARMv8 platforms, are changed to use DMB instruction to reflect this. +* **Added support for new sync modes into mempool ring driver.** + + Added ability to select new ring synchronisation modes: + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` + via mempool ops API. + * **Added the support for vfio-pci new VF token interface.** From Linux 5.7, vfio-pci supports to bind both SR-IOV PF and the created VFs, diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c index bc123fc52..b1f09ff28 100644 --- a/drivers/mempool/ring/rte_mempool_ring.c +++ b/drivers/mempool/ring/rte_mempool_ring.c @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static int common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) { @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static unsigned common_ring_get_count(const struct rte_mempool *mp) { return rte_ring_count(mp->pool_data); } - static int -common_ring_alloc(struct rte_mempool *mp) +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) { - int rg_flags = 0, ret; + int ret; char rg_name[RTE_RING_NAMESIZE]; struct rte_ring *r; @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) return -rte_errno; } - /* ring flags */ - if (mp->flags & MEMPOOL_F_SP_PUT) - rg_flags |= RING_F_SP_ENQ; - if (mp->flags & MEMPOOL_F_SC_GET) - rg_flags |= RING_F_SC_DEQ; - /* * Allocate the ring that will be used to store objects. * Ring functions will return appropriate errors if we are @@ -82,6 +105,31 @@ common_ring_alloc(struct rte_mempool *mp) return 0; } +static int +common_ring_alloc(struct rte_mempool *mp) +{ + uint32_t rg_flags = 0; + + if (mp->flags & MEMPOOL_F_SP_PUT) + rg_flags |= RING_F_SP_ENQ; + if (mp->flags & MEMPOOL_F_SC_GET) + rg_flags |= RING_F_SC_DEQ; + + return ring_alloc(mp, rg_flags); +} + +static int +rts_ring_alloc(struct rte_mempool *mp) +{ + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); +} + +static int +hts_ring_alloc(struct rte_mempool *mp) +{ + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); +} + static void common_ring_free(struct rte_mempool *mp) { @@ -130,7 +178,29 @@ static const struct rte_mempool_ops ops_sp_mc = { .get_count = common_ring_get_count, }; +/* ops for mempool with ring in MT_RTS sync mode */ +static const struct rte_mempool_ops ops_mt_rts = { + .name = "ring_mt_rts", + .alloc = rts_ring_alloc, + .free = common_ring_free, + .enqueue = rts_ring_mp_enqueue, + .dequeue = rts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + +/* ops for mempool with ring in MT_HTS sync mode */ +static const struct rte_mempool_ops ops_mt_hts = { + .name = "ring_mt_hts", + .alloc = hts_ring_alloc, + .free = common_ring_free, + .enqueue = hts_ring_mp_enqueue, + .dequeue = hts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + MEMPOOL_REGISTER_OPS(ops_mp_mc); MEMPOOL_REGISTER_OPS(ops_sp_sc); MEMPOOL_REGISTER_OPS(ops_mp_sc); MEMPOOL_REGISTER_OPS(ops_sp_mc); +MEMPOOL_REGISTER_OPS(ops_mt_rts); +MEMPOOL_REGISTER_OPS(ops_mt_hts); -- 2.17.1
> On Mon, Jul 13, 2020 at 02:46:35PM +0000, Ananyev, Konstantin wrote:
> > Hi Olivier,
> >
> > > Hi Konstantin,
> > >
> > > On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> > > >
> > > >
> > > > >
> > > > > Hi Olivier,
> > > > >
> > > > > > Hi Konstantin,
> > > > > >
> > > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > > > Hi Olivier,
> > > > > > >
> > > > > > > > Hi Konstantin,
> > > > > > > >
> > > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > > > > v2:
> > > > > > > > > - update Release Notes (as per comments)
> > > > > > > > >
> > > > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > > > This change provides user with ability to select these
> > > > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > > > > ---
> > > > > > > > > doc/guides/rel_notes/release_20_08.rst | 6 ++
> > > > > > > > > drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > > > > 2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > > > > * Dump ``rte_flow`` memory consumption.
> > > > > > > > > * Measure packet per second forwarding.
> > > > > > > > >
> > > > > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > > > > +
> > > > > > > > > + Added ability to select new ring synchronisation modes:
> > > > > > > > > + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > > > > + via mempool ops API.
> > > > > > > > > +
> > > > > > > > >
> > > > > > > > > Removed Items
> > > > > > > > > -------------
> > > > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > > + unsigned int n)
> > > > > > > > > +{
> > > > > > > > > + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > > + unsigned int n)
> > > > > > > > > +{
> > > > > > > > > + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > static int
> > > > > > > > > common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > > {
> > > > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > > obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > > +{
> > > > > > > > > + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > > +{
> > > > > > > > > + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > > > + obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > static unsigned
> > > > > > > > > common_ring_get_count(const struct rte_mempool *mp)
> > > > > > > > > {
> > > > > > > > > return rte_ring_count(mp->pool_data);
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > -
> > > > > > > > > static int
> > > > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > > > > {
> > > > > > > > > - int rg_flags = 0, ret;
> > > > > > > > > + int ret;
> > > > > > > > > char rg_name[RTE_RING_NAMESIZE];
> > > > > > > > > struct rte_ring *r;
> > > > > > > > >
> > > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > return -rte_errno;
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > - /* ring flags */
> > > > > > > > > - if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > > - rg_flags |= RING_F_SP_ENQ;
> > > > > > > > > - if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > > - rg_flags |= RING_F_SC_DEQ;
> > > > > > > > > -
> > > > > > > > > /*
> > > > > > > > > * Allocate the ring that will be used to store objects.
> > > > > > > > > * Ring functions will return appropriate errors if we are
> > > > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > return 0;
> > > > > > > > > }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +{
> > > > > > > > > + uint32_t rg_flags;
> > > > > > > > > +
> > > > > > > > > + rg_flags = 0;
> > > > > > > >
> > > > > > > > Maybe it could go on the same line
> > > > > > > >
> > > > > > > > > +
> > > > > > > > > + /* ring flags */
> > > > > > > >
> > > > > > > > Not sure we need to keep this comment
> > > > > > > >
> > > > > > > > > + if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > > + rg_flags |= RING_F_SP_ENQ;
> > > > > > > > > + if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > > + rg_flags |= RING_F_SC_DEQ;
> > > > > > > > > +
> > > > > > > > > + return ring_alloc(mp, rg_flags);
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +{
> > > > > > > > > + if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > > > > + return -EINVAL;
> > > > > > > >
> > > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > > > > if it's not optimal)?
> > > > > > >
> > > > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > > > I suppose there are few choices:
> > > > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > > > > and create actual ring with ST sync mode for prod/cons.
> > > > > > > 2. Report an error.
> > > > > > > 3. Silently ignore these flags.
> > > > > > >
> > > > > > > As I can see for "ring_mp_mc" ops, we doing #1,
> > > > > > > while for "stack" we are doing #3.
> > > > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > > > Any thoughts from your side what preferable behaviour should be?
> > > > > >
> > > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > > > ring_mp_mc).
> > > > >
> > > > > As I understand, nothing prevents user from doing:
> > > > >
> > > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > > > > sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> > > >
> > > > Apologies, hit send accidently.
> > > > I meant user can do:
> > > >
> > > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > > > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > > >
> > > > An in that case, he'll get SP/SC ring underneath.
> > >
> > > It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> > > handler operations"), the flags SP_PUT/SC_GET are converted into a call
> > > to rte_mempool_set_ops_byname() in rte_mempool_create() only.
> > >
> > > In rte_mempool_create_empty(), these flags are ignored. It is expected
> > > that the user calls rte_mempool_set_ops_byname() by itself.
> >
> > As I understand the code - not exactly.
> > rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value,
> > but it does store it's value inside mp->flags.
> > Later, when mempool_ops_alloc_once() is called these flags will be used by
> > common_ring_alloc() and might override selected by ops ring behaviour.
> >
> > >
> > > I don't think it is a good behavior:
> > >
> > > 1/ The documentation of rte_mempool_create_empty() does not say that the
> > > flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
> > > sets the default ops like rte_mempool_create().
> > >
> > > 2/ If rte_mempool_set_ops_byname() is not called after
> > > rte_mempool_create_empty() (and it looks it happens in dpdk's code),
> > > the default ops are the ones registered at index 0. This depends on
> > > the link order.
> > >
> > > So I propose to move the following code in
> > > rte_mempool_create_empty().
> > >
> > > if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> > > ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> > > else if (flags & MEMPOOL_F_SP_PUT)
> > > ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> > > else if (flags & MEMPOOL_F_SC_GET)
> > > ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> > > else
> > > ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > >
> > > What do you think?
> >
> > I think it will be a good thing - as in that case we'll always have
> > "ring_mp_mc" selected as default one.
> > As another thought, it porbably would be good to deprecate and later remove
> > MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
> > These days user can select this behaviour via mempool ops and such dualism
> > just makes things more error-prone and harder to maintain.
> > Especially as we don't have clear policy what should be the higher priority
> > for sync mode selection: mempool ops or flags.
> >
>
> I'll tend to agree, however it would mean deprecate rte_mempool_create()
> too, because we wouldn't be able to set ops with it. Or we would have to
> add a 12th (!) argument to the function, to set the ops name.
>
> I don't like having that many arguments to this function, but it seems
> it is widely used, probably because it is just one function call (vs
> create_empty + set_ops + populate). So adding a "ops_name" argument is
> maybe the right thing to do, given we can keep abi compat.
My thought was - just keep rte_mempool_create()
parameter list as it is, and always set ops to "ring_mp_mc" for it.
Users who'd like some other ops would force to use
create_empty+set_ops+populate.
That's pretty much the same what we have right now,
the only diff will be ring with SP/SC mode.
13/07/2020 17:50, Konstantin Ananyev:
> Two new sync modes were introduced into rte_ring:
> relaxed tail sync (RTS) and head/tail sync (HTS).
> This change provides user with ability to select these
> modes for ring based mempool via mempool ops API.
>
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> Acked-by: Gage Eads <gage.eads@intel.com>
> ---
> doc/guides/mempool/ring.rst | 10 +++
> doc/guides/rel_notes/release_20_08.rst | 6 ++
> drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++---
> 3 files changed, 95 insertions(+), 9 deletions(-)
>
> diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst
> index b8659c03f..9e2824070 100644
> --- a/doc/guides/mempool/ring.rst
> +++ b/doc/guides/mempool/ring.rst
> @@ -29,6 +29,16 @@ and can be selected via mempool ops API:
> Underlying **rte_ring** operates in multi-thread producer,
> single-thread consumer sync mode.
>
> +- ``ring_mt_rts``
> +
> + For underlying **rte_ring** both producer and consumer operate in
> + multi-thread Relaxed Tail Sync (RTS) mode.
> +
> +- ``ring_mt_hts``
> +
> + For underlying **rte_ring** both producer and consumer operate in
> + multi-thread Haad-Tail Sync (HTS) mode.
Typo Haad -> Head
Please explain what these modes mean and why/when use them.
> > 13/07/2020 17:50, Konstantin Ananyev: > > Two new sync modes were introduced into rte_ring: > > relaxed tail sync (RTS) and head/tail sync (HTS). > > This change provides user with ability to select these > > modes for ring based mempool via mempool ops API. > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > > Acked-by: Gage Eads <gage.eads@intel.com> > > --- > > doc/guides/mempool/ring.rst | 10 +++ > > doc/guides/rel_notes/release_20_08.rst | 6 ++ > > drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++--- > > 3 files changed, 95 insertions(+), 9 deletions(-) > > > > diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst > > index b8659c03f..9e2824070 100644 > > --- a/doc/guides/mempool/ring.rst > > +++ b/doc/guides/mempool/ring.rst > > @@ -29,6 +29,16 @@ and can be selected via mempool ops API: > > Underlying **rte_ring** operates in multi-thread producer, > > single-thread consumer sync mode. > > > > +- ``ring_mt_rts`` > > + > > + For underlying **rte_ring** both producer and consumer operate in > > + multi-thread Relaxed Tail Sync (RTS) mode. > > + > > +- ``ring_mt_hts`` > > + > > + For underlying **rte_ring** both producer and consumer operate in > > + multi-thread Haad-Tail Sync (HTS) mode. > > Typo Haad -> Head Will fix. > > Please explain what these modes mean and why/when use them. > We have quite detailed explanation in ring library guide: https://doc.dpdk.org/guides/prog_guide/ring_lib.html 7.6. Producer/consumer synchronization modes So here, I put a link to ring library guide for those who need more details. I can copy&paste same text here too, but it probably not much point in such duplication.
14/07/2020 11:16, Ananyev, Konstantin: > > > > 13/07/2020 17:50, Konstantin Ananyev: > > > Two new sync modes were introduced into rte_ring: > > > relaxed tail sync (RTS) and head/tail sync (HTS). > > > This change provides user with ability to select these > > > modes for ring based mempool via mempool ops API. > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> > > > Acked-by: Gage Eads <gage.eads@intel.com> > > > --- > > > --- a/doc/guides/mempool/ring.rst > > > +++ b/doc/guides/mempool/ring.rst > > > @@ -29,6 +29,16 @@ and can be selected via mempool ops API: > > > Underlying **rte_ring** operates in multi-thread producer, > > > single-thread consumer sync mode. > > > > > > +- ``ring_mt_rts`` > > > + > > > + For underlying **rte_ring** both producer and consumer operate in > > > + multi-thread Relaxed Tail Sync (RTS) mode. > > > + > > > +- ``ring_mt_hts`` > > > + > > > + For underlying **rte_ring** both producer and consumer operate in > > > + multi-thread Haad-Tail Sync (HTS) mode. > > > > Typo Haad -> Head > > Will fix. > > > > > Please explain what these modes mean and why/when use them. > > > > We have quite detailed explanation in ring library guide: > https://doc.dpdk.org/guides/prog_guide/ring_lib.html > 7.6. Producer/consumer synchronization modes > So here, I put a link to ring library guide for those who need more details. > I can copy&paste same text here too, > but it probably not much point in such duplication. Do not duplicate. Please add a direct link to the section: https://doc.dpdk.org/guides/prog_guide/ring_lib.html#mp-rts-mc-rts From a user perspective, I was looking for recommendation about which mode is preferred in which case. Maybe few words about what is an "overcommitted systems"?
v5: few more comments from Thomas regarding the docs v4: add new section into mempool derivers guide (Thomas Monjalon comments) v3: silently ignore F_SP_PUT/F_SC_GET (Olivier Matz comments) v2: update Release Notes (Gage Eads comments) Konstantin Ananyev (2): doc: add ring based mempool guide mempool/ring: add support for new ring sync modes doc/guides/mempool/index.rst | 1 + doc/guides/mempool/ring.rst | 52 +++++++++++++++ doc/guides/prog_guide/ring_lib.rst | 8 +++ doc/guides/rel_notes/release_20_08.rst | 6 ++ drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++--- 5 files changed, 146 insertions(+), 9 deletions(-) create mode 100644 doc/guides/mempool/ring.rst -- 2.17.1
Add documentation for rte_ring mempool driver. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> --- doc/guides/mempool/index.rst | 1 + doc/guides/mempool/ring.rst | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 doc/guides/mempool/ring.rst diff --git a/doc/guides/mempool/index.rst b/doc/guides/mempool/index.rst index 756610264..bbd02fd81 100644 --- a/doc/guides/mempool/index.rst +++ b/doc/guides/mempool/index.rst @@ -13,3 +13,4 @@ application through the mempool API. octeontx octeontx2 + ring diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst new file mode 100644 index 000000000..b8659c03f --- /dev/null +++ b/doc/guides/mempool/ring.rst @@ -0,0 +1,34 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2020 Intel Corporation. + +Ring Mempool Driver +============================== + +**rte_mempool_ring** is a pure SW mempool driver based on ``rte_ring`` +DPDK library. This is a default mempool driver. +Following modes of operation are available for ``ring`` mempool driver +and can be selected via mempool ops API: + +- ``ring_mp_mc`` + + Underlying **rte_ring** operates in multi-thread producer, + multi-thread consumer sync mode. + +- ``ring_sp_sc`` + + Underlying **rte_ring** operates in single-thread producer, + single-thread consumer sync mode. + +- ``ring_sp_mc`` + + Underlying **rte_ring** operates in single-thread producer, + multi-thread consumer sync mode. + +- ``ring_mp_sc`` + + Underlying **rte_ring** operates in multi-thread producer, + single-thread consumer sync mode. + + +For more information about ``rte_ring`` structure, behaviour and available +synchronisation modes please refer to: :doc:`../prog_guide/ring_lib`. -- 2.17.1
Two new sync modes were introduced into rte_ring: relaxed tail sync (RTS) and head/tail sync (HTS). This change provides user with ability to select these modes for ring based mempool via mempool ops API. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Acked-by: Gage Eads <gage.eads@intel.com> --- doc/guides/mempool/ring.rst | 22 ++++++- doc/guides/prog_guide/ring_lib.rst | 8 +++ doc/guides/rel_notes/release_20_08.rst | 6 ++ drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++--- 4 files changed, 113 insertions(+), 11 deletions(-) diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst index b8659c03f..ca03180ea 100644 --- a/doc/guides/mempool/ring.rst +++ b/doc/guides/mempool/ring.rst @@ -12,12 +12,14 @@ and can be selected via mempool ops API: - ``ring_mp_mc`` Underlying **rte_ring** operates in multi-thread producer, - multi-thread consumer sync mode. + multi-thread consumer sync mode. For more information please refer to: + :ref:`Ring_Library_MPMC_Mode`. - ``ring_sp_sc`` Underlying **rte_ring** operates in single-thread producer, - single-thread consumer sync mode. + single-thread consumer sync mode. For more information please refer to: + :ref:`Ring_Library_SPSC_Mode`. - ``ring_sp_mc`` @@ -29,6 +31,22 @@ and can be selected via mempool ops API: Underlying **rte_ring** operates in multi-thread producer, single-thread consumer sync mode. +- ``ring_mt_rts`` + + For underlying **rte_ring** both producer and consumer operate in + multi-thread Relaxed Tail Sync (RTS) mode. For more information please + refer to: :ref:`Ring_Library_MT_RTS_Mode`. + +- ``ring_mt_hts`` + + For underlying **rte_ring** both producer and consumer operate in + multi-thread Head-Tail Sync (HTS) mode. For more information please + refer to: :ref:`Ring_Library_MT_HTS_Mode`. + +For 'classic' DPDK deployments (with one thread per core) ``ring_mp_mc`` +mode is usually the most suitable and the fastest one. For overcommitted +scenarios (multiple threads share same set of cores) ``ring_mt_rts`` or +``ring_mt_hts`` usually provide a better alternative. For more information about ``rte_ring`` structure, behaviour and available synchronisation modes please refer to: :doc:`../prog_guide/ring_lib`. diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst index f0a5a78b0..895484d95 100644 --- a/doc/guides/prog_guide/ring_lib.rst +++ b/doc/guides/prog_guide/ring_lib.rst @@ -359,6 +359,8 @@ That should help users to configure ring in the most suitable way for his specific usage scenarios. Currently supported modes: +.. _Ring_Library_MPMC_Mode: + MP/MC (default one) ~~~~~~~~~~~~~~~~~~~ @@ -369,11 +371,15 @@ per core) this is usually the most suitable and fastest synchronization mode. As a well known limitation - it can perform quite pure on some overcommitted scenarios. +.. _Ring_Library_SPSC_Mode: + SP/SC ~~~~~ Single-producer (/single-consumer) mode. In this mode only one thread at a time is allowed to enqueue (/dequeue) objects to (/from) the ring. +.. _Ring_Library_MT_RTS_Mode: + MP_RTS/MC_RTS ~~~~~~~~~~~~~ @@ -390,6 +396,8 @@ one for head update, second for tail update. In comparison the original MP/MC algorithm requires one 32-bit CAS for head update and waiting/spinning on tail value. +.. _Ring_Library_MT_HTS_Mode: + MP_HTS/MC_HTS ~~~~~~~~~~~~~ diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst index 17d70e7c1..db25c6f9c 100644 --- a/doc/guides/rel_notes/release_20_08.rst +++ b/doc/guides/rel_notes/release_20_08.rst @@ -69,6 +69,12 @@ New Features barriers. rte_*mb APIs, for ARMv8 platforms, are changed to use DMB instruction to reflect this. +* **Added support for new sync modes into mempool ring driver.** + + Added ability to select new ring synchronisation modes: + ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)`` + via mempool ops API. + * **Added the support for vfio-pci new VF token interface.** From Linux 5.7, vfio-pci supports to bind both SR-IOV PF and the created VFs, diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c index bc123fc52..b1f09ff28 100644 --- a/drivers/mempool/ring/rte_mempool_ring.c +++ b/drivers/mempool/ring/rte_mempool_ring.c @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table, obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_rts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + return rte_ring_mp_hts_enqueue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static int common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) { @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n) obj_table, n, NULL) == 0 ? -ENOBUFS : 0; } +static int +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_rts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + +static int +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n) +{ + return rte_ring_mc_hts_dequeue_bulk(mp->pool_data, + obj_table, n, NULL) == 0 ? -ENOBUFS : 0; +} + static unsigned common_ring_get_count(const struct rte_mempool *mp) { return rte_ring_count(mp->pool_data); } - static int -common_ring_alloc(struct rte_mempool *mp) +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags) { - int rg_flags = 0, ret; + int ret; char rg_name[RTE_RING_NAMESIZE]; struct rte_ring *r; @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp) return -rte_errno; } - /* ring flags */ - if (mp->flags & MEMPOOL_F_SP_PUT) - rg_flags |= RING_F_SP_ENQ; - if (mp->flags & MEMPOOL_F_SC_GET) - rg_flags |= RING_F_SC_DEQ; - /* * Allocate the ring that will be used to store objects. * Ring functions will return appropriate errors if we are @@ -82,6 +105,31 @@ common_ring_alloc(struct rte_mempool *mp) return 0; } +static int +common_ring_alloc(struct rte_mempool *mp) +{ + uint32_t rg_flags = 0; + + if (mp->flags & MEMPOOL_F_SP_PUT) + rg_flags |= RING_F_SP_ENQ; + if (mp->flags & MEMPOOL_F_SC_GET) + rg_flags |= RING_F_SC_DEQ; + + return ring_alloc(mp, rg_flags); +} + +static int +rts_ring_alloc(struct rte_mempool *mp) +{ + return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ); +} + +static int +hts_ring_alloc(struct rte_mempool *mp) +{ + return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ); +} + static void common_ring_free(struct rte_mempool *mp) { @@ -130,7 +178,29 @@ static const struct rte_mempool_ops ops_sp_mc = { .get_count = common_ring_get_count, }; +/* ops for mempool with ring in MT_RTS sync mode */ +static const struct rte_mempool_ops ops_mt_rts = { + .name = "ring_mt_rts", + .alloc = rts_ring_alloc, + .free = common_ring_free, + .enqueue = rts_ring_mp_enqueue, + .dequeue = rts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + +/* ops for mempool with ring in MT_HTS sync mode */ +static const struct rte_mempool_ops ops_mt_hts = { + .name = "ring_mt_hts", + .alloc = hts_ring_alloc, + .free = common_ring_free, + .enqueue = hts_ring_mp_enqueue, + .dequeue = hts_ring_mc_dequeue, + .get_count = common_ring_get_count, +}; + MEMPOOL_REGISTER_OPS(ops_mp_mc); MEMPOOL_REGISTER_OPS(ops_sp_sc); MEMPOOL_REGISTER_OPS(ops_mp_sc); MEMPOOL_REGISTER_OPS(ops_sp_mc); +MEMPOOL_REGISTER_OPS(ops_mt_rts); +MEMPOOL_REGISTER_OPS(ops_mt_hts); -- 2.17.1
On Wed, Jul 15, 2020 at 4:59 PM Konstantin Ananyev
<konstantin.ananyev@intel.com> wrote:
>
> Two new sync modes were introduced into rte_ring:
> relaxed tail sync (RTS) and head/tail sync (HTS).
> This change provides user with ability to select these
> modes for ring based mempool via mempool ops API.
>
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> Acked-by: Gage Eads <gage.eads@intel.com>
No change in behavior for existing users so I went and took it for -rc2.
Series applied, thanks.
--
David Marchand