DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 20.08] mempool/ring: add support for new ring sync modes
@ 2020-05-21 13:20 Konstantin Ananyev
  2020-06-29 16:10 ` [dpdk-dev] [PATCH v2] " Konstantin Ananyev
  0 siblings, 1 reply; 25+ messages in thread
From: Konstantin Ananyev @ 2020-05-21 13:20 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, arybchenko, jielong.zjl, Konstantin Ananyev

Two new sync modes were introduced into rte_ring:
relaxed tail sync (RTS) and head/tail sync (HTS).
This change provides user with ability to select these
modes for ring based mempool via mempool ops API.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
 1 file changed, 88 insertions(+), 9 deletions(-)

diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
index bc123fc52..15ec7dee7 100644
--- a/drivers/mempool/ring/rte_mempool_ring.c
+++ b/drivers/mempool/ring/rte_mempool_ring.c
@@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static int
 common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
@@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static unsigned
 common_ring_get_count(const struct rte_mempool *mp)
 {
 	return rte_ring_count(mp->pool_data);
 }
 
-
 static int
-common_ring_alloc(struct rte_mempool *mp)
+ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
 {
-	int rg_flags = 0, ret;
+	int ret;
 	char rg_name[RTE_RING_NAMESIZE];
 	struct rte_ring *r;
 
@@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
 		return -rte_errno;
 	}
 
-	/* ring flags */
-	if (mp->flags & MEMPOOL_F_SP_PUT)
-		rg_flags |= RING_F_SP_ENQ;
-	if (mp->flags & MEMPOOL_F_SC_GET)
-		rg_flags |= RING_F_SC_DEQ;
-
 	/*
 	 * Allocate the ring that will be used to store objects.
 	 * Ring functions will return appropriate errors if we are
@@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
 	return 0;
 }
 
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+	uint32_t rg_flags;
+
+	rg_flags = 0;
+
+	/* ring flags */
+	if (mp->flags & MEMPOOL_F_SP_PUT)
+		rg_flags |= RING_F_SP_ENQ;
+	if (mp->flags & MEMPOOL_F_SC_GET)
+		rg_flags |= RING_F_SC_DEQ;
+
+	return ring_alloc(mp, rg_flags);
+}
+
+static int
+rts_ring_alloc(struct rte_mempool *mp)
+{
+	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
+		return -EINVAL;
+
+	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
+}
+
+static int
+hts_ring_alloc(struct rte_mempool *mp)
+{
+	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
+		return -EINVAL;
+
+	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
+}
+
 static void
 common_ring_free(struct rte_mempool *mp)
 {
@@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
 	.get_count = common_ring_get_count,
 };
 
+/* ops for mempool with ring in MT_RTS sync mode */
+static const struct rte_mempool_ops ops_mt_rts = {
+	.name = "ring_mt_rts",
+	.alloc = rts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = rts_ring_mp_enqueue,
+	.dequeue = rts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
+/* ops for mempool with ring in MT_HTS sync mode */
+static const struct rte_mempool_ops ops_mt_hts = {
+	.name = "ring_mt_hts",
+	.alloc = hts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = hts_ring_mp_enqueue,
+	.dequeue = hts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
 MEMPOOL_REGISTER_OPS(ops_mp_mc);
 MEMPOOL_REGISTER_OPS(ops_sp_sc);
 MEMPOOL_REGISTER_OPS(ops_mp_sc);
 MEMPOOL_REGISTER_OPS(ops_sp_mc);
+MEMPOOL_REGISTER_OPS(ops_mt_rts);
+MEMPOOL_REGISTER_OPS(ops_mt_hts);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-05-21 13:20 [dpdk-dev] [PATCH 20.08] mempool/ring: add support for new ring sync modes Konstantin Ananyev
@ 2020-06-29 16:10 ` Konstantin Ananyev
  2020-07-09 16:18   ` Olivier Matz
  2020-07-10 16:21   ` [dpdk-dev] [PATCH v3] " Konstantin Ananyev
  0 siblings, 2 replies; 25+ messages in thread
From: Konstantin Ananyev @ 2020-06-29 16:10 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, arybchenko, jielong.zjl, gage.eads, Konstantin Ananyev

v2:
 - update Release Notes (as per comments)

Two new sync modes were introduced into rte_ring:
relaxed tail sync (RTS) and head/tail sync (HTS).
This change provides user with ability to select these
modes for ring based mempool via mempool ops API.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Gage Eads <gage.eads@intel.com>
---
 doc/guides/rel_notes/release_20_08.rst  |  6 ++
 drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
 2 files changed, 94 insertions(+), 9 deletions(-)

diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index eaaf11c37..7bdcf3aac 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -84,6 +84,12 @@ New Features
   * Dump ``rte_flow`` memory consumption.
   * Measure packet per second forwarding.
 
+* **Added support for new sync modes into mempool ring driver.**
+
+  Added ability to select new ring synchronisation modes:
+  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
+  via mempool ops API.
+
 
 Removed Items
 -------------
diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
index bc123fc52..15ec7dee7 100644
--- a/drivers/mempool/ring/rte_mempool_ring.c
+++ b/drivers/mempool/ring/rte_mempool_ring.c
@@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static int
 common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
@@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static unsigned
 common_ring_get_count(const struct rte_mempool *mp)
 {
 	return rte_ring_count(mp->pool_data);
 }
 
-
 static int
-common_ring_alloc(struct rte_mempool *mp)
+ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
 {
-	int rg_flags = 0, ret;
+	int ret;
 	char rg_name[RTE_RING_NAMESIZE];
 	struct rte_ring *r;
 
@@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
 		return -rte_errno;
 	}
 
-	/* ring flags */
-	if (mp->flags & MEMPOOL_F_SP_PUT)
-		rg_flags |= RING_F_SP_ENQ;
-	if (mp->flags & MEMPOOL_F_SC_GET)
-		rg_flags |= RING_F_SC_DEQ;
-
 	/*
 	 * Allocate the ring that will be used to store objects.
 	 * Ring functions will return appropriate errors if we are
@@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
 	return 0;
 }
 
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+	uint32_t rg_flags;
+
+	rg_flags = 0;
+
+	/* ring flags */
+	if (mp->flags & MEMPOOL_F_SP_PUT)
+		rg_flags |= RING_F_SP_ENQ;
+	if (mp->flags & MEMPOOL_F_SC_GET)
+		rg_flags |= RING_F_SC_DEQ;
+
+	return ring_alloc(mp, rg_flags);
+}
+
+static int
+rts_ring_alloc(struct rte_mempool *mp)
+{
+	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
+		return -EINVAL;
+
+	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
+}
+
+static int
+hts_ring_alloc(struct rte_mempool *mp)
+{
+	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
+		return -EINVAL;
+
+	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
+}
+
 static void
 common_ring_free(struct rte_mempool *mp)
 {
@@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
 	.get_count = common_ring_get_count,
 };
 
+/* ops for mempool with ring in MT_RTS sync mode */
+static const struct rte_mempool_ops ops_mt_rts = {
+	.name = "ring_mt_rts",
+	.alloc = rts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = rts_ring_mp_enqueue,
+	.dequeue = rts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
+/* ops for mempool with ring in MT_HTS sync mode */
+static const struct rte_mempool_ops ops_mt_hts = {
+	.name = "ring_mt_hts",
+	.alloc = hts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = hts_ring_mp_enqueue,
+	.dequeue = hts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
 MEMPOOL_REGISTER_OPS(ops_mp_mc);
 MEMPOOL_REGISTER_OPS(ops_sp_sc);
 MEMPOOL_REGISTER_OPS(ops_mp_sc);
 MEMPOOL_REGISTER_OPS(ops_sp_mc);
+MEMPOOL_REGISTER_OPS(ops_mt_rts);
+MEMPOOL_REGISTER_OPS(ops_mt_hts);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-06-29 16:10 ` [dpdk-dev] [PATCH v2] " Konstantin Ananyev
@ 2020-07-09 16:18   ` Olivier Matz
  2020-07-09 17:55     ` Ananyev, Konstantin
  2020-07-10 16:21   ` [dpdk-dev] [PATCH v3] " Konstantin Ananyev
  1 sibling, 1 reply; 25+ messages in thread
From: Olivier Matz @ 2020-07-09 16:18 UTC (permalink / raw)
  To: Konstantin Ananyev; +Cc: dev, arybchenko, jielong.zjl, gage.eads

Hi Konstantin,

On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> v2:
>  - update Release Notes (as per comments)
> 
> Two new sync modes were introduced into rte_ring:
> relaxed tail sync (RTS) and head/tail sync (HTS).
> This change provides user with ability to select these
> modes for ring based mempool via mempool ops API.
> 
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> Acked-by: Gage Eads <gage.eads@intel.com>
> ---
>  doc/guides/rel_notes/release_20_08.rst  |  6 ++
>  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
>  2 files changed, 94 insertions(+), 9 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> index eaaf11c37..7bdcf3aac 100644
> --- a/doc/guides/rel_notes/release_20_08.rst
> +++ b/doc/guides/rel_notes/release_20_08.rst
> @@ -84,6 +84,12 @@ New Features
>    * Dump ``rte_flow`` memory consumption.
>    * Measure packet per second forwarding.
>  
> +* **Added support for new sync modes into mempool ring driver.**
> +
> +  Added ability to select new ring synchronisation modes:
> +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> +  via mempool ops API.
> +
>  
>  Removed Items
>  -------------
> diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> index bc123fc52..15ec7dee7 100644
> --- a/drivers/mempool/ring/rte_mempool_ring.c
> +++ b/drivers/mempool/ring/rte_mempool_ring.c
> @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
>  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
>  }
>  
> +static int
> +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> +	unsigned int n)
> +{
> +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> +}
> +
> +static int
> +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> +	unsigned int n)
> +{
> +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> +}
> +
>  static int
>  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
>  {
> @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
>  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
>  }
>  
> +static int
> +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> +{
> +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> +}
> +
> +static int
> +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> +{
> +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> +}
> +
>  static unsigned
>  common_ring_get_count(const struct rte_mempool *mp)
>  {
>  	return rte_ring_count(mp->pool_data);
>  }
>  
> -
>  static int
> -common_ring_alloc(struct rte_mempool *mp)
> +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
>  {
> -	int rg_flags = 0, ret;
> +	int ret;
>  	char rg_name[RTE_RING_NAMESIZE];
>  	struct rte_ring *r;
>  
> @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
>  		return -rte_errno;
>  	}
>  
> -	/* ring flags */
> -	if (mp->flags & MEMPOOL_F_SP_PUT)
> -		rg_flags |= RING_F_SP_ENQ;
> -	if (mp->flags & MEMPOOL_F_SC_GET)
> -		rg_flags |= RING_F_SC_DEQ;
> -
>  	/*
>  	 * Allocate the ring that will be used to store objects.
>  	 * Ring functions will return appropriate errors if we are
> @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
>  	return 0;
>  }
>  
> +static int
> +common_ring_alloc(struct rte_mempool *mp)
> +{
> +	uint32_t rg_flags;
> +
> +	rg_flags = 0;

Maybe it could go on the same line

> +
> +	/* ring flags */

Not sure we need to keep this comment

> +	if (mp->flags & MEMPOOL_F_SP_PUT)
> +		rg_flags |= RING_F_SP_ENQ;
> +	if (mp->flags & MEMPOOL_F_SC_GET)
> +		rg_flags |= RING_F_SC_DEQ;
> +
> +	return ring_alloc(mp, rg_flags);
> +}
> +
> +static int
> +rts_ring_alloc(struct rte_mempool *mp)
> +{
> +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> +		return -EINVAL;

Why do we need this? It is a problem to allow sc/sp in this mode (even
if it's not optimal)?

> +
> +	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> +}
> +
> +static int
> +hts_ring_alloc(struct rte_mempool *mp)
> +{
> +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> +		return -EINVAL;
> +
> +	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> +}
> +
>  static void
>  common_ring_free(struct rte_mempool *mp)
>  {
> @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
>  	.get_count = common_ring_get_count,
>  };
>  
> +/* ops for mempool with ring in MT_RTS sync mode */
> +static const struct rte_mempool_ops ops_mt_rts = {
> +	.name = "ring_mt_rts",
> +	.alloc = rts_ring_alloc,
> +	.free = common_ring_free,
> +	.enqueue = rts_ring_mp_enqueue,
> +	.dequeue = rts_ring_mc_dequeue,
> +	.get_count = common_ring_get_count,
> +};
> +
> +/* ops for mempool with ring in MT_HTS sync mode */
> +static const struct rte_mempool_ops ops_mt_hts = {
> +	.name = "ring_mt_hts",
> +	.alloc = hts_ring_alloc,
> +	.free = common_ring_free,
> +	.enqueue = hts_ring_mp_enqueue,
> +	.dequeue = hts_ring_mc_dequeue,
> +	.get_count = common_ring_get_count,
> +};
> +
>  MEMPOOL_REGISTER_OPS(ops_mp_mc);
>  MEMPOOL_REGISTER_OPS(ops_sp_sc);
>  MEMPOOL_REGISTER_OPS(ops_mp_sc);
>  MEMPOOL_REGISTER_OPS(ops_sp_mc);
> +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> +MEMPOOL_REGISTER_OPS(ops_mt_hts);

Not really related to your patch, but I think we need a function to
dump the name of available mempool ops. We could even add a description.
The problem we have is that a user does not know on which criteria is
should use a driver or another (except for platform drivers).


Olivier

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-07-09 16:18   ` Olivier Matz
@ 2020-07-09 17:55     ` Ananyev, Konstantin
  2020-07-10 12:52       ` Olivier Matz
  0 siblings, 1 reply; 25+ messages in thread
From: Ananyev, Konstantin @ 2020-07-09 17:55 UTC (permalink / raw)
  To: Olivier Matz; +Cc: dev, arybchenko, jielong.zjl, Eads, Gage

Hi Olivier,
 
> Hi Konstantin,
> 
> On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > v2:
> >  - update Release Notes (as per comments)
> >
> > Two new sync modes were introduced into rte_ring:
> > relaxed tail sync (RTS) and head/tail sync (HTS).
> > This change provides user with ability to select these
> > modes for ring based mempool via mempool ops API.
> >
> > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > Acked-by: Gage Eads <gage.eads@intel.com>
> > ---
> >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> >  2 files changed, 94 insertions(+), 9 deletions(-)
> >
> > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > index eaaf11c37..7bdcf3aac 100644
> > --- a/doc/guides/rel_notes/release_20_08.rst
> > +++ b/doc/guides/rel_notes/release_20_08.rst
> > @@ -84,6 +84,12 @@ New Features
> >    * Dump ``rte_flow`` memory consumption.
> >    * Measure packet per second forwarding.
> >
> > +* **Added support for new sync modes into mempool ring driver.**
> > +
> > +  Added ability to select new ring synchronisation modes:
> > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > +  via mempool ops API.
> > +
> >
> >  Removed Items
> >  -------------
> > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > index bc123fc52..15ec7dee7 100644
> > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> >  }
> >
> > +static int
> > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > +	unsigned int n)
> > +{
> > +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> > +static int
> > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > +	unsigned int n)
> > +{
> > +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> >  static int
> >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> >  {
> > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> >  }
> >
> > +static int
> > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > +{
> > +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> > +static int
> > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > +{
> > +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> >  static unsigned
> >  common_ring_get_count(const struct rte_mempool *mp)
> >  {
> >  	return rte_ring_count(mp->pool_data);
> >  }
> >
> > -
> >  static int
> > -common_ring_alloc(struct rte_mempool *mp)
> > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> >  {
> > -	int rg_flags = 0, ret;
> > +	int ret;
> >  	char rg_name[RTE_RING_NAMESIZE];
> >  	struct rte_ring *r;
> >
> > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> >  		return -rte_errno;
> >  	}
> >
> > -	/* ring flags */
> > -	if (mp->flags & MEMPOOL_F_SP_PUT)
> > -		rg_flags |= RING_F_SP_ENQ;
> > -	if (mp->flags & MEMPOOL_F_SC_GET)
> > -		rg_flags |= RING_F_SC_DEQ;
> > -
> >  	/*
> >  	 * Allocate the ring that will be used to store objects.
> >  	 * Ring functions will return appropriate errors if we are
> > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> >  	return 0;
> >  }
> >
> > +static int
> > +common_ring_alloc(struct rte_mempool *mp)
> > +{
> > +	uint32_t rg_flags;
> > +
> > +	rg_flags = 0;
> 
> Maybe it could go on the same line
> 
> > +
> > +	/* ring flags */
> 
> Not sure we need to keep this comment
> 
> > +	if (mp->flags & MEMPOOL_F_SP_PUT)
> > +		rg_flags |= RING_F_SP_ENQ;
> > +	if (mp->flags & MEMPOOL_F_SC_GET)
> > +		rg_flags |= RING_F_SC_DEQ;
> > +
> > +	return ring_alloc(mp, rg_flags);
> > +}
> > +
> > +static int
> > +rts_ring_alloc(struct rte_mempool *mp)
> > +{
> > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > +		return -EINVAL;
> 
> Why do we need this? It is a problem to allow sc/sp in this mode (even
> if it's not optimal)?

These new sync modes (RTS, HTS) are for MT.
For SP/SC - there is simply no point to use MT sync modes.
I suppose there are few choices:
1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
   and create actual ring with ST sync mode for prod/cons. 
2. Report an error.
3. Silently ignore these flags.

As I can see for  "ring_mp_mc" ops, we doing #1, 
while for "stack" we are doing #3.
For RTS/HTS I chosoe #2, as it seems cleaner to me.
Any thoughts from your side what preferable behaviour should be?

> 
> > +
> > +	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> > +}
> > +
> > +static int
> > +hts_ring_alloc(struct rte_mempool *mp)
> > +{
> > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > +		return -EINVAL;
> > +
> > +	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> > +}
> > +
> >  static void
> >  common_ring_free(struct rte_mempool *mp)
> >  {
> > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
> >  	.get_count = common_ring_get_count,
> >  };
> >
> > +/* ops for mempool with ring in MT_RTS sync mode */
> > +static const struct rte_mempool_ops ops_mt_rts = {
> > +	.name = "ring_mt_rts",
> > +	.alloc = rts_ring_alloc,
> > +	.free = common_ring_free,
> > +	.enqueue = rts_ring_mp_enqueue,
> > +	.dequeue = rts_ring_mc_dequeue,
> > +	.get_count = common_ring_get_count,
> > +};
> > +
> > +/* ops for mempool with ring in MT_HTS sync mode */
> > +static const struct rte_mempool_ops ops_mt_hts = {
> > +	.name = "ring_mt_hts",
> > +	.alloc = hts_ring_alloc,
> > +	.free = common_ring_free,
> > +	.enqueue = hts_ring_mp_enqueue,
> > +	.dequeue = hts_ring_mc_dequeue,
> > +	.get_count = common_ring_get_count,
> > +};
> > +
> >  MEMPOOL_REGISTER_OPS(ops_mp_mc);
> >  MEMPOOL_REGISTER_OPS(ops_sp_sc);
> >  MEMPOOL_REGISTER_OPS(ops_mp_sc);
> >  MEMPOOL_REGISTER_OPS(ops_sp_mc);
> > +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> > +MEMPOOL_REGISTER_OPS(ops_mt_hts);
 
> Not really related to your patch, but I think we need a function to
> dump the name of available mempool ops. We could even add a description.
> The problem we have is that a user does not know on which criteria is
> should use a driver or another (except for platform drivers).

Agree, it will be usefull.
Though it probably subject for a separate patch.


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-07-09 17:55     ` Ananyev, Konstantin
@ 2020-07-10 12:52       ` Olivier Matz
  2020-07-10 15:15         ` Ananyev, Konstantin
  0 siblings, 1 reply; 25+ messages in thread
From: Olivier Matz @ 2020-07-10 12:52 UTC (permalink / raw)
  To: Ananyev, Konstantin; +Cc: dev, arybchenko, jielong.zjl, Eads, Gage

Hi Konstantin,

On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> Hi Olivier,
>  
> > Hi Konstantin,
> > 
> > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > v2:
> > >  - update Release Notes (as per comments)
> > >
> > > Two new sync modes were introduced into rte_ring:
> > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > This change provides user with ability to select these
> > > modes for ring based mempool via mempool ops API.
> > >
> > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > ---
> > >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> > >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > >  2 files changed, 94 insertions(+), 9 deletions(-)
> > >
> > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > index eaaf11c37..7bdcf3aac 100644
> > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > @@ -84,6 +84,12 @@ New Features
> > >    * Dump ``rte_flow`` memory consumption.
> > >    * Measure packet per second forwarding.
> > >
> > > +* **Added support for new sync modes into mempool ring driver.**
> > > +
> > > +  Added ability to select new ring synchronisation modes:
> > > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > +  via mempool ops API.
> > > +
> > >
> > >  Removed Items
> > >  -------------
> > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > index bc123fc52..15ec7dee7 100644
> > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > >  }
> > >
> > > +static int
> > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > +	unsigned int n)
> > > +{
> > > +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > +}
> > > +
> > > +static int
> > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > +	unsigned int n)
> > > +{
> > > +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > +}
> > > +
> > >  static int
> > >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > >  {
> > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > >  }
> > >
> > > +static int
> > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > +{
> > > +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > +}
> > > +
> > > +static int
> > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > +{
> > > +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > +}
> > > +
> > >  static unsigned
> > >  common_ring_get_count(const struct rte_mempool *mp)
> > >  {
> > >  	return rte_ring_count(mp->pool_data);
> > >  }
> > >
> > > -
> > >  static int
> > > -common_ring_alloc(struct rte_mempool *mp)
> > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > >  {
> > > -	int rg_flags = 0, ret;
> > > +	int ret;
> > >  	char rg_name[RTE_RING_NAMESIZE];
> > >  	struct rte_ring *r;
> > >
> > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > >  		return -rte_errno;
> > >  	}
> > >
> > > -	/* ring flags */
> > > -	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > -		rg_flags |= RING_F_SP_ENQ;
> > > -	if (mp->flags & MEMPOOL_F_SC_GET)
> > > -		rg_flags |= RING_F_SC_DEQ;
> > > -
> > >  	/*
> > >  	 * Allocate the ring that will be used to store objects.
> > >  	 * Ring functions will return appropriate errors if we are
> > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > >  	return 0;
> > >  }
> > >
> > > +static int
> > > +common_ring_alloc(struct rte_mempool *mp)
> > > +{
> > > +	uint32_t rg_flags;
> > > +
> > > +	rg_flags = 0;
> > 
> > Maybe it could go on the same line
> > 
> > > +
> > > +	/* ring flags */
> > 
> > Not sure we need to keep this comment
> > 
> > > +	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > +		rg_flags |= RING_F_SP_ENQ;
> > > +	if (mp->flags & MEMPOOL_F_SC_GET)
> > > +		rg_flags |= RING_F_SC_DEQ;
> > > +
> > > +	return ring_alloc(mp, rg_flags);
> > > +}
> > > +
> > > +static int
> > > +rts_ring_alloc(struct rte_mempool *mp)
> > > +{
> > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > +		return -EINVAL;
> > 
> > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > if it's not optimal)?
> 
> These new sync modes (RTS, HTS) are for MT.
> For SP/SC - there is simply no point to use MT sync modes.
> I suppose there are few choices:
> 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
>    and create actual ring with ST sync mode for prod/cons. 
> 2. Report an error.
> 3. Silently ignore these flags.
> 
> As I can see for  "ring_mp_mc" ops, we doing #1, 
> while for "stack" we are doing #3.
> For RTS/HTS I chosoe #2, as it seems cleaner to me.
> Any thoughts from your side what preferable behaviour should be?

The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
ring_mp_mc). I don't think we should look at it when using specific ops.

So I'll tend to say 3. is the correct thing to do.


> 
> > 
> > > +
> > > +	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> > > +}
> > > +
> > > +static int
> > > +hts_ring_alloc(struct rte_mempool *mp)
> > > +{
> > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > +		return -EINVAL;
> > > +
> > > +	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> > > +}
> > > +
> > >  static void
> > >  common_ring_free(struct rte_mempool *mp)
> > >  {
> > > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
> > >  	.get_count = common_ring_get_count,
> > >  };
> > >
> > > +/* ops for mempool with ring in MT_RTS sync mode */
> > > +static const struct rte_mempool_ops ops_mt_rts = {
> > > +	.name = "ring_mt_rts",
> > > +	.alloc = rts_ring_alloc,
> > > +	.free = common_ring_free,
> > > +	.enqueue = rts_ring_mp_enqueue,
> > > +	.dequeue = rts_ring_mc_dequeue,
> > > +	.get_count = common_ring_get_count,
> > > +};
> > > +
> > > +/* ops for mempool with ring in MT_HTS sync mode */
> > > +static const struct rte_mempool_ops ops_mt_hts = {
> > > +	.name = "ring_mt_hts",
> > > +	.alloc = hts_ring_alloc,
> > > +	.free = common_ring_free,
> > > +	.enqueue = hts_ring_mp_enqueue,
> > > +	.dequeue = hts_ring_mc_dequeue,
> > > +	.get_count = common_ring_get_count,
> > > +};
> > > +
> > >  MEMPOOL_REGISTER_OPS(ops_mp_mc);
> > >  MEMPOOL_REGISTER_OPS(ops_sp_sc);
> > >  MEMPOOL_REGISTER_OPS(ops_mp_sc);
> > >  MEMPOOL_REGISTER_OPS(ops_sp_mc);
> > > +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> > > +MEMPOOL_REGISTER_OPS(ops_mt_hts);
>  
> > Not really related to your patch, but I think we need a function to
> > dump the name of available mempool ops. We could even add a description.
> > The problem we have is that a user does not know on which criteria is
> > should use a driver or another (except for platform drivers).
> 
> Agree, it will be usefull.
> Though it probably subject for a separate patch.
> 

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-07-10 12:52       ` Olivier Matz
@ 2020-07-10 15:15         ` Ananyev, Konstantin
  2020-07-10 15:20           ` Ananyev, Konstantin
  0 siblings, 1 reply; 25+ messages in thread
From: Ananyev, Konstantin @ 2020-07-10 15:15 UTC (permalink / raw)
  To: Olivier Matz; +Cc: dev, arybchenko, jielong.zjl, Eads, Gage

Hi Olivier,
 
> Hi Konstantin,
> 
> On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > Hi Olivier,
> >
> > > Hi Konstantin,
> > >
> > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > v2:
> > > >  - update Release Notes (as per comments)
> > > >
> > > > Two new sync modes were introduced into rte_ring:
> > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > This change provides user with ability to select these
> > > > modes for ring based mempool via mempool ops API.
> > > >
> > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > ---
> > > >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> > > >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > >  2 files changed, 94 insertions(+), 9 deletions(-)
> > > >
> > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > index eaaf11c37..7bdcf3aac 100644
> > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > @@ -84,6 +84,12 @@ New Features
> > > >    * Dump ``rte_flow`` memory consumption.
> > > >    * Measure packet per second forwarding.
> > > >
> > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > +
> > > > +  Added ability to select new ring synchronisation modes:
> > > > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > +  via mempool ops API.
> > > > +
> > > >
> > > >  Removed Items
> > > >  -------------
> > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > index bc123fc52..15ec7dee7 100644
> > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > >  }
> > > >
> > > > +static int
> > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > +	unsigned int n)
> > > > +{
> > > > +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > +}
> > > > +
> > > > +static int
> > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > +	unsigned int n)
> > > > +{
> > > > +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > +}
> > > > +
> > > >  static int
> > > >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > >  {
> > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > >  }
> > > >
> > > > +static int
> > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > +{
> > > > +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > +}
> > > > +
> > > > +static int
> > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > +{
> > > > +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > +}
> > > > +
> > > >  static unsigned
> > > >  common_ring_get_count(const struct rte_mempool *mp)
> > > >  {
> > > >  	return rte_ring_count(mp->pool_data);
> > > >  }
> > > >
> > > > -
> > > >  static int
> > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > >  {
> > > > -	int rg_flags = 0, ret;
> > > > +	int ret;
> > > >  	char rg_name[RTE_RING_NAMESIZE];
> > > >  	struct rte_ring *r;
> > > >
> > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > >  		return -rte_errno;
> > > >  	}
> > > >
> > > > -	/* ring flags */
> > > > -	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > -		rg_flags |= RING_F_SP_ENQ;
> > > > -	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > -		rg_flags |= RING_F_SC_DEQ;
> > > > -
> > > >  	/*
> > > >  	 * Allocate the ring that will be used to store objects.
> > > >  	 * Ring functions will return appropriate errors if we are
> > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > >  	return 0;
> > > >  }
> > > >
> > > > +static int
> > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > +{
> > > > +	uint32_t rg_flags;
> > > > +
> > > > +	rg_flags = 0;
> > >
> > > Maybe it could go on the same line
> > >
> > > > +
> > > > +	/* ring flags */
> > >
> > > Not sure we need to keep this comment
> > >
> > > > +	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > +		rg_flags |= RING_F_SP_ENQ;
> > > > +	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > +		rg_flags |= RING_F_SC_DEQ;
> > > > +
> > > > +	return ring_alloc(mp, rg_flags);
> > > > +}
> > > > +
> > > > +static int
> > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > +{
> > > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > +		return -EINVAL;
> > >
> > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > if it's not optimal)?
> >
> > These new sync modes (RTS, HTS) are for MT.
> > For SP/SC - there is simply no point to use MT sync modes.
> > I suppose there are few choices:
> > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> >    and create actual ring with ST sync mode for prod/cons.
> > 2. Report an error.
> > 3. Silently ignore these flags.
> >
> > As I can see for  "ring_mp_mc" ops, we doing #1,
> > while for "stack" we are doing #3.
> > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > Any thoughts from your side what preferable behaviour should be?
> 
> The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> ring_mp_mc). 

As I understand, nothing prevents user from doing:

mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
                 sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);


>I don't think we should look at it when using specific ops.
> 
> So I'll tend to say 3. is the correct thing to do.

Ok, will resend v3 then.

> 
> >
> > >
> > > > +
> > > > +	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> > > > +}
> > > > +
> > > > +static int
> > > > +hts_ring_alloc(struct rte_mempool *mp)
> > > > +{
> > > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > +		return -EINVAL;
> > > > +
> > > > +	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> > > > +}
> > > > +
> > > >  static void
> > > >  common_ring_free(struct rte_mempool *mp)
> > > >  {
> > > > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
> > > >  	.get_count = common_ring_get_count,
> > > >  };
> > > >
> > > > +/* ops for mempool with ring in MT_RTS sync mode */
> > > > +static const struct rte_mempool_ops ops_mt_rts = {
> > > > +	.name = "ring_mt_rts",
> > > > +	.alloc = rts_ring_alloc,
> > > > +	.free = common_ring_free,
> > > > +	.enqueue = rts_ring_mp_enqueue,
> > > > +	.dequeue = rts_ring_mc_dequeue,
> > > > +	.get_count = common_ring_get_count,
> > > > +};
> > > > +
> > > > +/* ops for mempool with ring in MT_HTS sync mode */
> > > > +static const struct rte_mempool_ops ops_mt_hts = {
> > > > +	.name = "ring_mt_hts",
> > > > +	.alloc = hts_ring_alloc,
> > > > +	.free = common_ring_free,
> > > > +	.enqueue = hts_ring_mp_enqueue,
> > > > +	.dequeue = hts_ring_mc_dequeue,
> > > > +	.get_count = common_ring_get_count,
> > > > +};
> > > > +
> > > >  MEMPOOL_REGISTER_OPS(ops_mp_mc);
> > > >  MEMPOOL_REGISTER_OPS(ops_sp_sc);
> > > >  MEMPOOL_REGISTER_OPS(ops_mp_sc);
> > > >  MEMPOOL_REGISTER_OPS(ops_sp_mc);
> > > > +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> > > > +MEMPOOL_REGISTER_OPS(ops_mt_hts);
> >
> > > Not really related to your patch, but I think we need a function to
> > > dump the name of available mempool ops. We could even add a description.
> > > The problem we have is that a user does not know on which criteria is
> > > should use a driver or another (except for platform drivers).
> >
> > Agree, it will be usefull.
> > Though it probably subject for a separate patch.
> >

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-07-10 15:15         ` Ananyev, Konstantin
@ 2020-07-10 15:20           ` Ananyev, Konstantin
  2020-07-13 13:30             ` Olivier Matz
  0 siblings, 1 reply; 25+ messages in thread
From: Ananyev, Konstantin @ 2020-07-10 15:20 UTC (permalink / raw)
  To: Olivier Matz; +Cc: dev, arybchenko, jielong.zjl, Eads, Gage



> 
> Hi Olivier,
> 
> > Hi Konstantin,
> >
> > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > Hi Olivier,
> > >
> > > > Hi Konstantin,
> > > >
> > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > v2:
> > > > >  - update Release Notes (as per comments)
> > > > >
> > > > > Two new sync modes were introduced into rte_ring:
> > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > This change provides user with ability to select these
> > > > > modes for ring based mempool via mempool ops API.
> > > > >
> > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > ---
> > > > >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> > > > >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > >  2 files changed, 94 insertions(+), 9 deletions(-)
> > > > >
> > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > @@ -84,6 +84,12 @@ New Features
> > > > >    * Dump ``rte_flow`` memory consumption.
> > > > >    * Measure packet per second forwarding.
> > > > >
> > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > +
> > > > > +  Added ability to select new ring synchronisation modes:
> > > > > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > +  via mempool ops API.
> > > > > +
> > > > >
> > > > >  Removed Items
> > > > >  -------------
> > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > index bc123fc52..15ec7dee7 100644
> > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > >  }
> > > > >
> > > > > +static int
> > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > +	unsigned int n)
> > > > > +{
> > > > > +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > +}
> > > > > +
> > > > > +static int
> > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > +	unsigned int n)
> > > > > +{
> > > > > +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > +}
> > > > > +
> > > > >  static int
> > > > >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > >  {
> > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > >  }
> > > > >
> > > > > +static int
> > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > +{
> > > > > +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > +}
> > > > > +
> > > > > +static int
> > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > +{
> > > > > +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > +}
> > > > > +
> > > > >  static unsigned
> > > > >  common_ring_get_count(const struct rte_mempool *mp)
> > > > >  {
> > > > >  	return rte_ring_count(mp->pool_data);
> > > > >  }
> > > > >
> > > > > -
> > > > >  static int
> > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > >  {
> > > > > -	int rg_flags = 0, ret;
> > > > > +	int ret;
> > > > >  	char rg_name[RTE_RING_NAMESIZE];
> > > > >  	struct rte_ring *r;
> > > > >
> > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > >  		return -rte_errno;
> > > > >  	}
> > > > >
> > > > > -	/* ring flags */
> > > > > -	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > -		rg_flags |= RING_F_SP_ENQ;
> > > > > -	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > -		rg_flags |= RING_F_SC_DEQ;
> > > > > -
> > > > >  	/*
> > > > >  	 * Allocate the ring that will be used to store objects.
> > > > >  	 * Ring functions will return appropriate errors if we are
> > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > >  	return 0;
> > > > >  }
> > > > >
> > > > > +static int
> > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > +{
> > > > > +	uint32_t rg_flags;
> > > > > +
> > > > > +	rg_flags = 0;
> > > >
> > > > Maybe it could go on the same line
> > > >
> > > > > +
> > > > > +	/* ring flags */
> > > >
> > > > Not sure we need to keep this comment
> > > >
> > > > > +	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > +		rg_flags |= RING_F_SP_ENQ;
> > > > > +	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > +		rg_flags |= RING_F_SC_DEQ;
> > > > > +
> > > > > +	return ring_alloc(mp, rg_flags);
> > > > > +}
> > > > > +
> > > > > +static int
> > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > +{
> > > > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > +		return -EINVAL;
> > > >
> > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > if it's not optimal)?
> > >
> > > These new sync modes (RTS, HTS) are for MT.
> > > For SP/SC - there is simply no point to use MT sync modes.
> > > I suppose there are few choices:
> > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > >    and create actual ring with ST sync mode for prod/cons.
> > > 2. Report an error.
> > > 3. Silently ignore these flags.
> > >
> > > As I can see for  "ring_mp_mc" ops, we doing #1,
> > > while for "stack" we are doing #3.
> > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > Any thoughts from your side what preferable behaviour should be?
> >
> > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > ring_mp_mc).
> 
> As I understand, nothing prevents user from doing:
> 
> mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
>                  sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);

Apologies, hit send accidently.
I meant user can do:

mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);

An in that case, he'll get SP/SC ring underneath.

> 
> 
> >I don't think we should look at it when using specific ops.
> >
> > So I'll tend to say 3. is the correct thing to do.
> 
> Ok, will resend v3 then.
> 

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [dpdk-dev] [PATCH v3] mempool/ring: add support for new ring sync modes
  2020-06-29 16:10 ` [dpdk-dev] [PATCH v2] " Konstantin Ananyev
  2020-07-09 16:18   ` Olivier Matz
@ 2020-07-10 16:21   ` Konstantin Ananyev
  2020-07-10 22:44     ` Thomas Monjalon
  2020-07-13 15:50     ` [dpdk-dev] [PATCH v4 0/2] " Konstantin Ananyev
  1 sibling, 2 replies; 25+ messages in thread
From: Konstantin Ananyev @ 2020-07-10 16:21 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, arybchenko, jielong.zjl, gage.eads, Konstantin Ananyev

Two new sync modes were introduced into rte_ring:
relaxed tail sync (RTS) and head/tail sync (HTS).
This change provides user with ability to select these
modes for ring based mempool via mempool ops API.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Gage Eads <gage.eads@intel.com>
---

v3: silently ignore F_SP_PUT/F_SC_GET (Olivier comment)

v2: update Release Notes (as per comments)

 doc/guides/rel_notes/release_20_08.rst  |  6 ++
 drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++---
 2 files changed, 85 insertions(+), 9 deletions(-)

diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index 17d70e7c1..fff819fca 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -171,6 +171,12 @@ New Features
   See the :doc:`../sample_app_ug/l2_forward_real_virtual` for more
   details of this parameter usage.
 
+* **Added support for new sync modes into mempool ring driver.**
+
+  Added ability to select new ring synchronisation modes:
+  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
+  via mempool ops API.
+
 
 Removed Items
 -------------
diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
index bc123fc52..b1f09ff28 100644
--- a/drivers/mempool/ring/rte_mempool_ring.c
+++ b/drivers/mempool/ring/rte_mempool_ring.c
@@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static int
 common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
@@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static unsigned
 common_ring_get_count(const struct rte_mempool *mp)
 {
 	return rte_ring_count(mp->pool_data);
 }
 
-
 static int
-common_ring_alloc(struct rte_mempool *mp)
+ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
 {
-	int rg_flags = 0, ret;
+	int ret;
 	char rg_name[RTE_RING_NAMESIZE];
 	struct rte_ring *r;
 
@@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
 		return -rte_errno;
 	}
 
-	/* ring flags */
-	if (mp->flags & MEMPOOL_F_SP_PUT)
-		rg_flags |= RING_F_SP_ENQ;
-	if (mp->flags & MEMPOOL_F_SC_GET)
-		rg_flags |= RING_F_SC_DEQ;
-
 	/*
 	 * Allocate the ring that will be used to store objects.
 	 * Ring functions will return appropriate errors if we are
@@ -82,6 +105,31 @@ common_ring_alloc(struct rte_mempool *mp)
 	return 0;
 }
 
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+	uint32_t rg_flags = 0;
+
+	if (mp->flags & MEMPOOL_F_SP_PUT)
+		rg_flags |= RING_F_SP_ENQ;
+	if (mp->flags & MEMPOOL_F_SC_GET)
+		rg_flags |= RING_F_SC_DEQ;
+
+	return ring_alloc(mp, rg_flags);
+}
+
+static int
+rts_ring_alloc(struct rte_mempool *mp)
+{
+	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
+}
+
+static int
+hts_ring_alloc(struct rte_mempool *mp)
+{
+	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
+}
+
 static void
 common_ring_free(struct rte_mempool *mp)
 {
@@ -130,7 +178,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
 	.get_count = common_ring_get_count,
 };
 
+/* ops for mempool with ring in MT_RTS sync mode */
+static const struct rte_mempool_ops ops_mt_rts = {
+	.name = "ring_mt_rts",
+	.alloc = rts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = rts_ring_mp_enqueue,
+	.dequeue = rts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
+/* ops for mempool with ring in MT_HTS sync mode */
+static const struct rte_mempool_ops ops_mt_hts = {
+	.name = "ring_mt_hts",
+	.alloc = hts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = hts_ring_mp_enqueue,
+	.dequeue = hts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
 MEMPOOL_REGISTER_OPS(ops_mp_mc);
 MEMPOOL_REGISTER_OPS(ops_sp_sc);
 MEMPOOL_REGISTER_OPS(ops_mp_sc);
 MEMPOOL_REGISTER_OPS(ops_sp_mc);
+MEMPOOL_REGISTER_OPS(ops_mt_rts);
+MEMPOOL_REGISTER_OPS(ops_mt_hts);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v3] mempool/ring: add support for new ring sync modes
  2020-07-10 16:21   ` [dpdk-dev] [PATCH v3] " Konstantin Ananyev
@ 2020-07-10 22:44     ` Thomas Monjalon
  2020-07-13 12:58       ` Ananyev, Konstantin
  2020-07-13 15:50     ` [dpdk-dev] [PATCH v4 0/2] " Konstantin Ananyev
  1 sibling, 1 reply; 25+ messages in thread
From: Thomas Monjalon @ 2020-07-10 22:44 UTC (permalink / raw)
  To: Konstantin Ananyev
  Cc: dev, olivier.matz, arybchenko, jielong.zjl, gage.eads,
	david.marchand, ferruh.yigit

10/07/2020 18:21, Konstantin Ananyev:
> Two new sync modes were introduced into rte_ring:
> relaxed tail sync (RTS) and head/tail sync (HTS).
> This change provides user with ability to select these
> modes for ring based mempool via mempool ops API.

No, the user cannot use these modes because there is no doc,
except in release notes.

Sorry I don't understand why I have to do such obvious comment
after so many years.
And more importantly, why nobody else asked for doc?

I think I will add "UNMAINTAINED" for doc in MAINTAINERS file.
If nobody is interested in doc maintenance, we could "git rm doc/".

Sorry in advance for hijacking this thread.

[...]
> --- a/doc/guides/rel_notes/release_20_08.rst
> +++ b/doc/guides/rel_notes/release_20_08.rst
> @@ -171,6 +171,12 @@ New Features
>    See the :doc:`../sample_app_ug/l2_forward_real_virtual` for more
>    details of this parameter usage.
>  
> +* **Added support for new sync modes into mempool ring driver.**
> +
> +  Added ability to select new ring synchronisation modes:
> +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> +  via mempool ops API.

Please check the comment on top of new features about the ordering.
Mempool features should be mentioned earlier in the list.




^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v3] mempool/ring: add support for new ring sync modes
  2020-07-10 22:44     ` Thomas Monjalon
@ 2020-07-13 12:58       ` Ananyev, Konstantin
  2020-07-13 13:57         ` Thomas Monjalon
  0 siblings, 1 reply; 25+ messages in thread
From: Ananyev, Konstantin @ 2020-07-13 12:58 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: dev, olivier.matz, arybchenko, jielong.zjl, Eads, Gage,
	david.marchand, Yigit, Ferruh

> 10/07/2020 18:21, Konstantin Ananyev:
> > Two new sync modes were introduced into rte_ring:
> > relaxed tail sync (RTS) and head/tail sync (HTS).
> > This change provides user with ability to select these
> > modes for ring based mempool via mempool ops API.
> 
> No, the user cannot use these modes because there is no doc,
> except in release notes.

AFAIK, there is no section for rte_ring and others SW based 
mempool drivers:
https://doc.dpdk.org/guides/mempool/index.html
Are you asking me to add such section as part of this patch?
 
> 
> Sorry I don't understand why I have to do such obvious comment
> after so many years.
> And more importantly, why nobody else asked for doc?
> 
> I think I will add "UNMAINTAINED" for doc in MAINTAINERS file.
> If nobody is interested in doc maintenance, we could "git rm doc/".
> 
> Sorry in advance for hijacking this thread.
> 
> [...]
> > --- a/doc/guides/rel_notes/release_20_08.rst
> > +++ b/doc/guides/rel_notes/release_20_08.rst
> > @@ -171,6 +171,12 @@ New Features
> >    See the :doc:`../sample_app_ug/l2_forward_real_virtual` for more
> >    details of this parameter usage.
> >
> > +* **Added support for new sync modes into mempool ring driver.**
> > +
> > +  Added ability to select new ring synchronisation modes:
> > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > +  via mempool ops API.
> 
> Please check the comment on top of new features about the ordering.
> Mempool features should be mentioned earlier in the list.
> 
> 


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-07-10 15:20           ` Ananyev, Konstantin
@ 2020-07-13 13:30             ` Olivier Matz
  2020-07-13 14:46               ` Ananyev, Konstantin
  0 siblings, 1 reply; 25+ messages in thread
From: Olivier Matz @ 2020-07-13 13:30 UTC (permalink / raw)
  To: Ananyev, Konstantin; +Cc: dev, arybchenko, jielong.zjl, Eads, Gage

Hi Konstantin,

On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> 
> 
> > 
> > Hi Olivier,
> > 
> > > Hi Konstantin,
> > >
> > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > Hi Olivier,
> > > >
> > > > > Hi Konstantin,
> > > > >
> > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > v2:
> > > > > >  - update Release Notes (as per comments)
> > > > > >
> > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > This change provides user with ability to select these
> > > > > > modes for ring based mempool via mempool ops API.
> > > > > >
> > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > ---
> > > > > >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> > > > > >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > >  2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > >
> > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > >    * Dump ``rte_flow`` memory consumption.
> > > > > >    * Measure packet per second forwarding.
> > > > > >
> > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > +
> > > > > > +  Added ability to select new ring synchronisation modes:
> > > > > > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > +  via mempool ops API.
> > > > > > +
> > > > > >
> > > > > >  Removed Items
> > > > > >  -------------
> > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > >  }
> > > > > >
> > > > > > +static int
> > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > +	unsigned int n)
> > > > > > +{
> > > > > > +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > +}
> > > > > > +
> > > > > > +static int
> > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > +	unsigned int n)
> > > > > > +{
> > > > > > +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > +}
> > > > > > +
> > > > > >  static int
> > > > > >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > >  {
> > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > >  }
> > > > > >
> > > > > > +static int
> > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > +{
> > > > > > +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > +}
> > > > > > +
> > > > > > +static int
> > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > +{
> > > > > > +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > +}
> > > > > > +
> > > > > >  static unsigned
> > > > > >  common_ring_get_count(const struct rte_mempool *mp)
> > > > > >  {
> > > > > >  	return rte_ring_count(mp->pool_data);
> > > > > >  }
> > > > > >
> > > > > > -
> > > > > >  static int
> > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > >  {
> > > > > > -	int rg_flags = 0, ret;
> > > > > > +	int ret;
> > > > > >  	char rg_name[RTE_RING_NAMESIZE];
> > > > > >  	struct rte_ring *r;
> > > > > >
> > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > >  		return -rte_errno;
> > > > > >  	}
> > > > > >
> > > > > > -	/* ring flags */
> > > > > > -	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > -		rg_flags |= RING_F_SP_ENQ;
> > > > > > -	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > -		rg_flags |= RING_F_SC_DEQ;
> > > > > > -
> > > > > >  	/*
> > > > > >  	 * Allocate the ring that will be used to store objects.
> > > > > >  	 * Ring functions will return appropriate errors if we are
> > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > >  	return 0;
> > > > > >  }
> > > > > >
> > > > > > +static int
> > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > +{
> > > > > > +	uint32_t rg_flags;
> > > > > > +
> > > > > > +	rg_flags = 0;
> > > > >
> > > > > Maybe it could go on the same line
> > > > >
> > > > > > +
> > > > > > +	/* ring flags */
> > > > >
> > > > > Not sure we need to keep this comment
> > > > >
> > > > > > +	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > +		rg_flags |= RING_F_SP_ENQ;
> > > > > > +	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > +		rg_flags |= RING_F_SC_DEQ;
> > > > > > +
> > > > > > +	return ring_alloc(mp, rg_flags);
> > > > > > +}
> > > > > > +
> > > > > > +static int
> > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > +{
> > > > > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > +		return -EINVAL;
> > > > >
> > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > if it's not optimal)?
> > > >
> > > > These new sync modes (RTS, HTS) are for MT.
> > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > I suppose there are few choices:
> > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > >    and create actual ring with ST sync mode for prod/cons.
> > > > 2. Report an error.
> > > > 3. Silently ignore these flags.
> > > >
> > > > As I can see for  "ring_mp_mc" ops, we doing #1,
> > > > while for "stack" we are doing #3.
> > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > Any thoughts from your side what preferable behaviour should be?
> > >
> > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > ring_mp_mc).
> > 
> > As I understand, nothing prevents user from doing:
> > 
> > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> >                  sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> 
> Apologies, hit send accidently.
> I meant user can do:
> 
> mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> 
> An in that case, he'll get SP/SC ring underneath.

It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
handler operations"), the flags SP_PUT/SC_GET are converted into a call
to rte_mempool_set_ops_byname() in rte_mempool_create() only.

In rte_mempool_create_empty(), these flags are ignored. It is expected
that the user calls rte_mempool_set_ops_byname() by itself.

I don't think it is a good behavior:

1/ The documentation of rte_mempool_create_empty() does not say that the
   flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
   sets the default ops like rte_mempool_create().

2/ If rte_mempool_set_ops_byname() is not called after
   rte_mempool_create_empty() (and it looks it happens in dpdk's code),
   the default ops are the ones registered at index 0. This depends on
   the link order.

So I propose to move the following code in
rte_mempool_create_empty().

	if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
		ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
	else if (flags & MEMPOOL_F_SP_PUT)
		ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
	else if (flags & MEMPOOL_F_SC_GET)
		ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
	else
		ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);

What do you think?



> 
> > 
> > 
> > >I don't think we should look at it when using specific ops.
> > >
> > > So I'll tend to say 3. is the correct thing to do.
> > 
> > Ok, will resend v3 then.
> > 

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v3] mempool/ring: add support for new ring sync modes
  2020-07-13 12:58       ` Ananyev, Konstantin
@ 2020-07-13 13:57         ` Thomas Monjalon
  0 siblings, 0 replies; 25+ messages in thread
From: Thomas Monjalon @ 2020-07-13 13:57 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: dev, olivier.matz, arybchenko, jielong.zjl, Eads, Gage,
	david.marchand, Yigit, Ferruh

13/07/2020 14:58, Ananyev, Konstantin:
> > 10/07/2020 18:21, Konstantin Ananyev:
> > > Two new sync modes were introduced into rte_ring:
> > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > This change provides user with ability to select these
> > > modes for ring based mempool via mempool ops API.
> > 
> > No, the user cannot use these modes because there is no doc,
> > except in release notes.
> 
> AFAIK, there is no section for rte_ring and others SW based 
> mempool drivers:
> https://doc.dpdk.org/guides/mempool/index.html
> Are you asking me to add such section as part of this patch?

I'm asking to provide some documentation to the users yes.

It can be in doc you pointed above with a link from the prog guide:
https://doc.dpdk.org/guides/prog_guide/mempool_lib.html#mempool-handlers

And because it is solving issues in some use cases,
it needs to be called out why and when use these options.



^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-07-13 13:30             ` Olivier Matz
@ 2020-07-13 14:46               ` Ananyev, Konstantin
  2020-07-13 15:00                 ` Olivier Matz
  0 siblings, 1 reply; 25+ messages in thread
From: Ananyev, Konstantin @ 2020-07-13 14:46 UTC (permalink / raw)
  To: Olivier Matz; +Cc: dev, arybchenko, jielong.zjl, Eads, Gage

Hi Olivier,

> Hi Konstantin,
> 
> On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> >
> >
> > >
> > > Hi Olivier,
> > >
> > > > Hi Konstantin,
> > > >
> > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > Hi Olivier,
> > > > >
> > > > > > Hi Konstantin,
> > > > > >
> > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > > v2:
> > > > > > >  - update Release Notes (as per comments)
> > > > > > >
> > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > This change provides user with ability to select these
> > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > >
> > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > > ---
> > > > > > >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> > > > > > >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > >  2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > >
> > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > >    * Dump ``rte_flow`` memory consumption.
> > > > > > >    * Measure packet per second forwarding.
> > > > > > >
> > > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > > +
> > > > > > > +  Added ability to select new ring synchronisation modes:
> > > > > > > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > > +  via mempool ops API.
> > > > > > > +
> > > > > > >
> > > > > > >  Removed Items
> > > > > > >  -------------
> > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > >  }
> > > > > > >
> > > > > > > +static int
> > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > +	unsigned int n)
> > > > > > > +{
> > > > > > > +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > +}
> > > > > > > +
> > > > > > > +static int
> > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > +	unsigned int n)
> > > > > > > +{
> > > > > > > +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > +}
> > > > > > > +
> > > > > > >  static int
> > > > > > >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > >  {
> > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > >  }
> > > > > > >
> > > > > > > +static int
> > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > +{
> > > > > > > +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > +}
> > > > > > > +
> > > > > > > +static int
> > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > +{
> > > > > > > +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > +}
> > > > > > > +
> > > > > > >  static unsigned
> > > > > > >  common_ring_get_count(const struct rte_mempool *mp)
> > > > > > >  {
> > > > > > >  	return rte_ring_count(mp->pool_data);
> > > > > > >  }
> > > > > > >
> > > > > > > -
> > > > > > >  static int
> > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > >  {
> > > > > > > -	int rg_flags = 0, ret;
> > > > > > > +	int ret;
> > > > > > >  	char rg_name[RTE_RING_NAMESIZE];
> > > > > > >  	struct rte_ring *r;
> > > > > > >
> > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > >  		return -rte_errno;
> > > > > > >  	}
> > > > > > >
> > > > > > > -	/* ring flags */
> > > > > > > -	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > -		rg_flags |= RING_F_SP_ENQ;
> > > > > > > -	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > -		rg_flags |= RING_F_SC_DEQ;
> > > > > > > -
> > > > > > >  	/*
> > > > > > >  	 * Allocate the ring that will be used to store objects.
> > > > > > >  	 * Ring functions will return appropriate errors if we are
> > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > >  	return 0;
> > > > > > >  }
> > > > > > >
> > > > > > > +static int
> > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > +{
> > > > > > > +	uint32_t rg_flags;
> > > > > > > +
> > > > > > > +	rg_flags = 0;
> > > > > >
> > > > > > Maybe it could go on the same line
> > > > > >
> > > > > > > +
> > > > > > > +	/* ring flags */
> > > > > >
> > > > > > Not sure we need to keep this comment
> > > > > >
> > > > > > > +	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > +		rg_flags |= RING_F_SP_ENQ;
> > > > > > > +	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > +		rg_flags |= RING_F_SC_DEQ;
> > > > > > > +
> > > > > > > +	return ring_alloc(mp, rg_flags);
> > > > > > > +}
> > > > > > > +
> > > > > > > +static int
> > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > +{
> > > > > > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > > +		return -EINVAL;
> > > > > >
> > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > > if it's not optimal)?
> > > > >
> > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > I suppose there are few choices:
> > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > >    and create actual ring with ST sync mode for prod/cons.
> > > > > 2. Report an error.
> > > > > 3. Silently ignore these flags.
> > > > >
> > > > > As I can see for  "ring_mp_mc" ops, we doing #1,
> > > > > while for "stack" we are doing #3.
> > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > Any thoughts from your side what preferable behaviour should be?
> > > >
> > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > ring_mp_mc).
> > >
> > > As I understand, nothing prevents user from doing:
> > >
> > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > >                  sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> >
> > Apologies, hit send accidently.
> > I meant user can do:
> >
> > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> >
> > An in that case, he'll get SP/SC ring underneath.
> 
> It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> handler operations"), the flags SP_PUT/SC_GET are converted into a call
> to rte_mempool_set_ops_byname() in rte_mempool_create() only.
> 
> In rte_mempool_create_empty(), these flags are ignored. It is expected
> that the user calls rte_mempool_set_ops_byname() by itself.

As I understand the code - not exactly.
rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value,
but it does store it's value inside mp->flags.
Later, when mempool_ops_alloc_once() is called these flags will be used by
common_ring_alloc() and might override selected by ops ring behaviour.

> 
> I don't think it is a good behavior:
> 
> 1/ The documentation of rte_mempool_create_empty() does not say that the
>    flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
>    sets the default ops like rte_mempool_create().
> 
> 2/ If rte_mempool_set_ops_byname() is not called after
>    rte_mempool_create_empty() (and it looks it happens in dpdk's code),
>    the default ops are the ones registered at index 0. This depends on
>    the link order.
> 
> So I propose to move the following code in
> rte_mempool_create_empty().
> 
> 	if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> 	else if (flags & MEMPOOL_F_SP_PUT)
> 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> 	else if (flags & MEMPOOL_F_SC_GET)
> 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> 	else
> 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> 
> What do you think?

I think it will be a good thing - as in that case we'll always have
"ring_mp_mc" selected as default one.
As another thought, it porbably would be good to deprecate and later remove
MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
These days user can select this behaviour via mempool ops and such dualism
just makes things more error-prone and harder to maintain.
Especially as we don't have clear policy what should be the higher priority
for sync mode selection: mempool ops or flags. 


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-07-13 14:46               ` Ananyev, Konstantin
@ 2020-07-13 15:00                 ` Olivier Matz
  2020-07-13 16:29                   ` Ananyev, Konstantin
  0 siblings, 1 reply; 25+ messages in thread
From: Olivier Matz @ 2020-07-13 15:00 UTC (permalink / raw)
  To: Ananyev, Konstantin; +Cc: dev, arybchenko, jielong.zjl, Eads, Gage

On Mon, Jul 13, 2020 at 02:46:35PM +0000, Ananyev, Konstantin wrote:
> Hi Olivier,
> 
> > Hi Konstantin,
> > 
> > On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> > >
> > >
> > > >
> > > > Hi Olivier,
> > > >
> > > > > Hi Konstantin,
> > > > >
> > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > > Hi Olivier,
> > > > > >
> > > > > > > Hi Konstantin,
> > > > > > >
> > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > > > v2:
> > > > > > > >  - update Release Notes (as per comments)
> > > > > > > >
> > > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > > This change provides user with ability to select these
> > > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > > >
> > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > > > ---
> > > > > > > >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> > > > > > > >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > > >  2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > > >
> > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > > >    * Dump ``rte_flow`` memory consumption.
> > > > > > > >    * Measure packet per second forwarding.
> > > > > > > >
> > > > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > > > +
> > > > > > > > +  Added ability to select new ring synchronisation modes:
> > > > > > > > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > > > +  via mempool ops API.
> > > > > > > > +
> > > > > > > >
> > > > > > > >  Removed Items
> > > > > > > >  -------------
> > > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > >  }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > +	unsigned int n)
> > > > > > > > +{
> > > > > > > > +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > +	unsigned int n)
> > > > > > > > +{
> > > > > > > > +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > >  static int
> > > > > > > >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > >  {
> > > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > >  }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > +{
> > > > > > > > +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > +{
> > > > > > > > +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > +}
> > > > > > > > +
> > > > > > > >  static unsigned
> > > > > > > >  common_ring_get_count(const struct rte_mempool *mp)
> > > > > > > >  {
> > > > > > > >  	return rte_ring_count(mp->pool_data);
> > > > > > > >  }
> > > > > > > >
> > > > > > > > -
> > > > > > > >  static int
> > > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > > >  {
> > > > > > > > -	int rg_flags = 0, ret;
> > > > > > > > +	int ret;
> > > > > > > >  	char rg_name[RTE_RING_NAMESIZE];
> > > > > > > >  	struct rte_ring *r;
> > > > > > > >
> > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > >  		return -rte_errno;
> > > > > > > >  	}
> > > > > > > >
> > > > > > > > -	/* ring flags */
> > > > > > > > -	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > -		rg_flags |= RING_F_SP_ENQ;
> > > > > > > > -	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > -		rg_flags |= RING_F_SC_DEQ;
> > > > > > > > -
> > > > > > > >  	/*
> > > > > > > >  	 * Allocate the ring that will be used to store objects.
> > > > > > > >  	 * Ring functions will return appropriate errors if we are
> > > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > >  	return 0;
> > > > > > > >  }
> > > > > > > >
> > > > > > > > +static int
> > > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +{
> > > > > > > > +	uint32_t rg_flags;
> > > > > > > > +
> > > > > > > > +	rg_flags = 0;
> > > > > > >
> > > > > > > Maybe it could go on the same line
> > > > > > >
> > > > > > > > +
> > > > > > > > +	/* ring flags */
> > > > > > >
> > > > > > > Not sure we need to keep this comment
> > > > > > >
> > > > > > > > +	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > +		rg_flags |= RING_F_SP_ENQ;
> > > > > > > > +	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > +		rg_flags |= RING_F_SC_DEQ;
> > > > > > > > +
> > > > > > > > +	return ring_alloc(mp, rg_flags);
> > > > > > > > +}
> > > > > > > > +
> > > > > > > > +static int
> > > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > > +{
> > > > > > > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > > > +		return -EINVAL;
> > > > > > >
> > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > > > if it's not optimal)?
> > > > > >
> > > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > > I suppose there are few choices:
> > > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > > >    and create actual ring with ST sync mode for prod/cons.
> > > > > > 2. Report an error.
> > > > > > 3. Silently ignore these flags.
> > > > > >
> > > > > > As I can see for  "ring_mp_mc" ops, we doing #1,
> > > > > > while for "stack" we are doing #3.
> > > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > > Any thoughts from your side what preferable behaviour should be?
> > > > >
> > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > > ring_mp_mc).
> > > >
> > > > As I understand, nothing prevents user from doing:
> > > >
> > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > > >                  sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> > >
> > > Apologies, hit send accidently.
> > > I meant user can do:
> > >
> > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > >
> > > An in that case, he'll get SP/SC ring underneath.
> > 
> > It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> > handler operations"), the flags SP_PUT/SC_GET are converted into a call
> > to rte_mempool_set_ops_byname() in rte_mempool_create() only.
> > 
> > In rte_mempool_create_empty(), these flags are ignored. It is expected
> > that the user calls rte_mempool_set_ops_byname() by itself.
> 
> As I understand the code - not exactly.
> rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value,
> but it does store it's value inside mp->flags.
> Later, when mempool_ops_alloc_once() is called these flags will be used by
> common_ring_alloc() and might override selected by ops ring behaviour.
> 
> > 
> > I don't think it is a good behavior:
> > 
> > 1/ The documentation of rte_mempool_create_empty() does not say that the
> >    flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
> >    sets the default ops like rte_mempool_create().
> > 
> > 2/ If rte_mempool_set_ops_byname() is not called after
> >    rte_mempool_create_empty() (and it looks it happens in dpdk's code),
> >    the default ops are the ones registered at index 0. This depends on
> >    the link order.
> > 
> > So I propose to move the following code in
> > rte_mempool_create_empty().
> > 
> > 	if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> > 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> > 	else if (flags & MEMPOOL_F_SP_PUT)
> > 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> > 	else if (flags & MEMPOOL_F_SC_GET)
> > 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> > 	else
> > 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > 
> > What do you think?
> 
> I think it will be a good thing - as in that case we'll always have
> "ring_mp_mc" selected as default one.
> As another thought, it porbably would be good to deprecate and later remove
> MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
> These days user can select this behaviour via mempool ops and such dualism
> just makes things more error-prone and harder to maintain.
> Especially as we don't have clear policy what should be the higher priority
> for sync mode selection: mempool ops or flags. 
> 

I'll tend to agree, however it would mean deprecate rte_mempool_create()
too, because we wouldn't be able to set ops with it. Or we would have to
add a 12th (!) argument to the function, to set the ops name.

I don't like having that many arguments to this function, but it seems
it is widely used, probably because it is just one function call (vs
create_empty + set_ops + populate). So adding a "ops_name" argument is
maybe the right thing to do, given we can keep abi compat.


^ permalink raw reply	[flat|nested] 25+ messages in thread

* [dpdk-dev] [PATCH v4 0/2] mempool/ring: add support for new ring sync modes
  2020-07-10 16:21   ` [dpdk-dev] [PATCH v3] " Konstantin Ananyev
  2020-07-10 22:44     ` Thomas Monjalon
@ 2020-07-13 15:50     ` Konstantin Ananyev
  2020-07-13 15:50       ` [dpdk-dev] [PATCH v4 1/2] doc: add ring based mempool guide Konstantin Ananyev
                         ` (2 more replies)
  1 sibling, 3 replies; 25+ messages in thread
From: Konstantin Ananyev @ 2020-07-13 15:50 UTC (permalink / raw)
  To: dev
  Cc: olivier.matz, arybchenko, jielong.zjl, gage.eads, thomas,
	Konstantin Ananyev

v4:
  add new section into mempool derivers guide (Thomas Monjalon comments)

v3:
  silently ignore F_SP_PUT/F_SC_GET (Olivier Matz comments)

v2:
  update Release Notes (Gage Eads comments)

Konstantin Ananyev (2):
  doc: add ring based mempool guide
  mempool/ring: add support for new ring sync modes

 doc/guides/mempool/index.rst            |  1 +
 doc/guides/mempool/ring.rst             | 44 +++++++++++++
 doc/guides/rel_notes/release_20_08.rst  |  6 ++
 drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++---
 4 files changed, 130 insertions(+), 9 deletions(-)
 create mode 100644 doc/guides/mempool/ring.rst

-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* [dpdk-dev] [PATCH v4 1/2] doc: add ring based mempool guide
  2020-07-13 15:50     ` [dpdk-dev] [PATCH v4 0/2] " Konstantin Ananyev
@ 2020-07-13 15:50       ` Konstantin Ananyev
  2020-07-13 15:50       ` [dpdk-dev] [PATCH v4 2/2] mempool/ring: add support for new ring sync modes Konstantin Ananyev
  2020-07-15 14:58       ` [dpdk-dev] [PATCH v5 0/2] " Konstantin Ananyev
  2 siblings, 0 replies; 25+ messages in thread
From: Konstantin Ananyev @ 2020-07-13 15:50 UTC (permalink / raw)
  To: dev
  Cc: olivier.matz, arybchenko, jielong.zjl, gage.eads, thomas,
	Konstantin Ananyev

Add documentation for rte_ring mempool driver.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 doc/guides/mempool/index.rst |  1 +
 doc/guides/mempool/ring.rst  | 34 ++++++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)
 create mode 100644 doc/guides/mempool/ring.rst

diff --git a/doc/guides/mempool/index.rst b/doc/guides/mempool/index.rst
index 756610264..bbd02fd81 100644
--- a/doc/guides/mempool/index.rst
+++ b/doc/guides/mempool/index.rst
@@ -13,3 +13,4 @@ application through the mempool API.
 
     octeontx
     octeontx2
+    ring
diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst
new file mode 100644
index 000000000..b8659c03f
--- /dev/null
+++ b/doc/guides/mempool/ring.rst
@@ -0,0 +1,34 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2020 Intel Corporation.
+
+Ring Mempool Driver
+==============================
+
+**rte_mempool_ring** is a pure SW mempool driver based on ``rte_ring``
+DPDK library. This is a default mempool driver.
+Following modes of operation are available for ``ring`` mempool driver
+and can be selected via mempool ops API:
+
+- ``ring_mp_mc``
+
+  Underlying **rte_ring** operates in multi-thread producer,
+  multi-thread consumer sync mode.
+
+- ``ring_sp_sc``
+
+  Underlying **rte_ring** operates in single-thread producer,
+  single-thread consumer sync mode.
+
+- ``ring_sp_mc``
+
+  Underlying **rte_ring** operates in single-thread producer,
+  multi-thread consumer sync mode.
+
+- ``ring_mp_sc``
+
+  Underlying **rte_ring** operates in multi-thread producer,
+  single-thread consumer sync mode.
+
+
+For more information about ``rte_ring`` structure, behaviour and available
+synchronisation modes please refer to: :doc:`../prog_guide/ring_lib`.
-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* [dpdk-dev] [PATCH v4 2/2] mempool/ring: add support for new ring sync modes
  2020-07-13 15:50     ` [dpdk-dev] [PATCH v4 0/2] " Konstantin Ananyev
  2020-07-13 15:50       ` [dpdk-dev] [PATCH v4 1/2] doc: add ring based mempool guide Konstantin Ananyev
@ 2020-07-13 15:50       ` Konstantin Ananyev
  2020-07-13 17:37         ` Thomas Monjalon
  2020-07-15 14:58       ` [dpdk-dev] [PATCH v5 0/2] " Konstantin Ananyev
  2 siblings, 1 reply; 25+ messages in thread
From: Konstantin Ananyev @ 2020-07-13 15:50 UTC (permalink / raw)
  To: dev
  Cc: olivier.matz, arybchenko, jielong.zjl, gage.eads, thomas,
	Konstantin Ananyev

Two new sync modes were introduced into rte_ring:
relaxed tail sync (RTS) and head/tail sync (HTS).
This change provides user with ability to select these
modes for ring based mempool via mempool ops API.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Gage Eads <gage.eads@intel.com>
---
 doc/guides/mempool/ring.rst             | 10 +++
 doc/guides/rel_notes/release_20_08.rst  |  6 ++
 drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++---
 3 files changed, 95 insertions(+), 9 deletions(-)

diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst
index b8659c03f..9e2824070 100644
--- a/doc/guides/mempool/ring.rst
+++ b/doc/guides/mempool/ring.rst
@@ -29,6 +29,16 @@ and can be selected via mempool ops API:
   Underlying **rte_ring** operates in multi-thread producer,
   single-thread consumer sync mode.
 
+- ``ring_mt_rts``
+
+  For underlying **rte_ring** both producer and consumer operate in
+  multi-thread Relaxed Tail Sync (RTS) mode.
+
+- ``ring_mt_hts``
+
+  For underlying **rte_ring** both producer and consumer operate in
+  multi-thread Haad-Tail Sync (HTS) mode.
+
 
 For more information about ``rte_ring`` structure, behaviour and available
 synchronisation modes please refer to: :doc:`../prog_guide/ring_lib`.
diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index 17d70e7c1..db25c6f9c 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -69,6 +69,12 @@ New Features
   barriers. rte_*mb APIs, for ARMv8 platforms, are changed to use DMB
   instruction to reflect this.
 
+* **Added support for new sync modes into mempool ring driver.**
+
+  Added ability to select new ring synchronisation modes:
+  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
+  via mempool ops API.
+
 * **Added the support for vfio-pci new VF token interface.**
 
   From Linux 5.7, vfio-pci supports to bind both SR-IOV PF and the created VFs,
diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
index bc123fc52..b1f09ff28 100644
--- a/drivers/mempool/ring/rte_mempool_ring.c
+++ b/drivers/mempool/ring/rte_mempool_ring.c
@@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static int
 common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
@@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static unsigned
 common_ring_get_count(const struct rte_mempool *mp)
 {
 	return rte_ring_count(mp->pool_data);
 }
 
-
 static int
-common_ring_alloc(struct rte_mempool *mp)
+ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
 {
-	int rg_flags = 0, ret;
+	int ret;
 	char rg_name[RTE_RING_NAMESIZE];
 	struct rte_ring *r;
 
@@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
 		return -rte_errno;
 	}
 
-	/* ring flags */
-	if (mp->flags & MEMPOOL_F_SP_PUT)
-		rg_flags |= RING_F_SP_ENQ;
-	if (mp->flags & MEMPOOL_F_SC_GET)
-		rg_flags |= RING_F_SC_DEQ;
-
 	/*
 	 * Allocate the ring that will be used to store objects.
 	 * Ring functions will return appropriate errors if we are
@@ -82,6 +105,31 @@ common_ring_alloc(struct rte_mempool *mp)
 	return 0;
 }
 
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+	uint32_t rg_flags = 0;
+
+	if (mp->flags & MEMPOOL_F_SP_PUT)
+		rg_flags |= RING_F_SP_ENQ;
+	if (mp->flags & MEMPOOL_F_SC_GET)
+		rg_flags |= RING_F_SC_DEQ;
+
+	return ring_alloc(mp, rg_flags);
+}
+
+static int
+rts_ring_alloc(struct rte_mempool *mp)
+{
+	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
+}
+
+static int
+hts_ring_alloc(struct rte_mempool *mp)
+{
+	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
+}
+
 static void
 common_ring_free(struct rte_mempool *mp)
 {
@@ -130,7 +178,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
 	.get_count = common_ring_get_count,
 };
 
+/* ops for mempool with ring in MT_RTS sync mode */
+static const struct rte_mempool_ops ops_mt_rts = {
+	.name = "ring_mt_rts",
+	.alloc = rts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = rts_ring_mp_enqueue,
+	.dequeue = rts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
+/* ops for mempool with ring in MT_HTS sync mode */
+static const struct rte_mempool_ops ops_mt_hts = {
+	.name = "ring_mt_hts",
+	.alloc = hts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = hts_ring_mp_enqueue,
+	.dequeue = hts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
 MEMPOOL_REGISTER_OPS(ops_mp_mc);
 MEMPOOL_REGISTER_OPS(ops_sp_sc);
 MEMPOOL_REGISTER_OPS(ops_mp_sc);
 MEMPOOL_REGISTER_OPS(ops_sp_mc);
+MEMPOOL_REGISTER_OPS(ops_mt_rts);
+MEMPOOL_REGISTER_OPS(ops_mt_hts);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/ring: add support for new ring sync modes
  2020-07-13 15:00                 ` Olivier Matz
@ 2020-07-13 16:29                   ` Ananyev, Konstantin
  0 siblings, 0 replies; 25+ messages in thread
From: Ananyev, Konstantin @ 2020-07-13 16:29 UTC (permalink / raw)
  To: Olivier Matz; +Cc: dev, arybchenko, jielong.zjl, Eads, Gage


> On Mon, Jul 13, 2020 at 02:46:35PM +0000, Ananyev, Konstantin wrote:
> > Hi Olivier,
> >
> > > Hi Konstantin,
> > >
> > > On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> > > >
> > > >
> > > > >
> > > > > Hi Olivier,
> > > > >
> > > > > > Hi Konstantin,
> > > > > >
> > > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > > > Hi Olivier,
> > > > > > >
> > > > > > > > Hi Konstantin,
> > > > > > > >
> > > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > > > > > > > > v2:
> > > > > > > > >  - update Release Notes (as per comments)
> > > > > > > > >
> > > > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > > > This change provides user with ability to select these
> > > > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > > > > > > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > > > > > > > ---
> > > > > > > > >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> > > > > > > > >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> > > > > > > > >  2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > > > >    * Dump ``rte_flow`` memory consumption.
> > > > > > > > >    * Measure packet per second forwarding.
> > > > > > > > >
> > > > > > > > > +* **Added support for new sync modes into mempool ring driver.**
> > > > > > > > > +
> > > > > > > > > +  Added ability to select new ring synchronisation modes:
> > > > > > > > > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > > > > > > > > +  via mempool ops API.
> > > > > > > > > +
> > > > > > > > >
> > > > > > > > >  Removed Items
> > > > > > > > >  -------------
> > > > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > > +	unsigned int n)
> > > > > > > > > +{
> > > > > > > > > +	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > > > > > > > > +	unsigned int n)
> > > > > > > > > +{
> > > > > > > > > +	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > >  static int
> > > > > > > > >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > >  {
> > > > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
> > > > > > > > >  			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > > +{
> > > > > > > > > +	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
> > > > > > > > > +{
> > > > > > > > > +	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > > > +			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > >  static unsigned
> > > > > > > > >  common_ring_get_count(const struct rte_mempool *mp)
> > > > > > > > >  {
> > > > > > > > >  	return rte_ring_count(mp->pool_data);
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > -
> > > > > > > > >  static int
> > > > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > > > >  {
> > > > > > > > > -	int rg_flags = 0, ret;
> > > > > > > > > +	int ret;
> > > > > > > > >  	char rg_name[RTE_RING_NAMESIZE];
> > > > > > > > >  	struct rte_ring *r;
> > > > > > > > >
> > > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > >  		return -rte_errno;
> > > > > > > > >  	}
> > > > > > > > >
> > > > > > > > > -	/* ring flags */
> > > > > > > > > -	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > > -		rg_flags |= RING_F_SP_ENQ;
> > > > > > > > > -	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > > -		rg_flags |= RING_F_SC_DEQ;
> > > > > > > > > -
> > > > > > > > >  	/*
> > > > > > > > >  	 * Allocate the ring that will be used to store objects.
> > > > > > > > >  	 * Ring functions will return appropriate errors if we are
> > > > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > >  	return 0;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +{
> > > > > > > > > +	uint32_t rg_flags;
> > > > > > > > > +
> > > > > > > > > +	rg_flags = 0;
> > > > > > > >
> > > > > > > > Maybe it could go on the same line
> > > > > > > >
> > > > > > > > > +
> > > > > > > > > +	/* ring flags */
> > > > > > > >
> > > > > > > > Not sure we need to keep this comment
> > > > > > > >
> > > > > > > > > +	if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > > +		rg_flags |= RING_F_SP_ENQ;
> > > > > > > > > +	if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > > +		rg_flags |= RING_F_SC_DEQ;
> > > > > > > > > +
> > > > > > > > > +	return ring_alloc(mp, rg_flags);
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +{
> > > > > > > > > +	if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > > > > > > > > +		return -EINVAL;
> > > > > > > >
> > > > > > > > Why do we need this? It is a problem to allow sc/sp in this mode (even
> > > > > > > > if it's not optimal)?
> > > > > > >
> > > > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > > > I suppose there are few choices:
> > > > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
> > > > > > >    and create actual ring with ST sync mode for prod/cons.
> > > > > > > 2. Report an error.
> > > > > > > 3. Silently ignore these flags.
> > > > > > >
> > > > > > > As I can see for  "ring_mp_mc" ops, we doing #1,
> > > > > > > while for "stack" we are doing #3.
> > > > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > > > Any thoughts from your side what preferable behaviour should be?
> > > > > >
> > > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to select
> > > > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > > > ring_mp_mc).
> > > > >
> > > > > As I understand, nothing prevents user from doing:
> > > > >
> > > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > > > >                  sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
> > > >
> > > > Apologies, hit send accidently.
> > > > I meant user can do:
> > > >
> > > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > > > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > > >
> > > > An in that case, he'll get SP/SC ring underneath.
> > >
> > > It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> > > handler operations"), the flags SP_PUT/SC_GET are converted into a call
> > > to rte_mempool_set_ops_byname() in rte_mempool_create() only.
> > >
> > > In rte_mempool_create_empty(), these flags are ignored. It is expected
> > > that the user calls rte_mempool_set_ops_byname() by itself.
> >
> > As I understand the code - not exactly.
> > rte_mempool_create_empty() doesn't make any specific actions based on 'flags' value,
> > but it does store it's value inside mp->flags.
> > Later, when mempool_ops_alloc_once() is called these flags will be used by
> > common_ring_alloc() and might override selected by ops ring behaviour.
> >
> > >
> > > I don't think it is a good behavior:
> > >
> > > 1/ The documentation of rte_mempool_create_empty() does not say that the
> > >    flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
> > >    sets the default ops like rte_mempool_create().
> > >
> > > 2/ If rte_mempool_set_ops_byname() is not called after
> > >    rte_mempool_create_empty() (and it looks it happens in dpdk's code),
> > >    the default ops are the ones registered at index 0. This depends on
> > >    the link order.
> > >
> > > So I propose to move the following code in
> > > rte_mempool_create_empty().
> > >
> > > 	if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> > > 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> > > 	else if (flags & MEMPOOL_F_SP_PUT)
> > > 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> > > 	else if (flags & MEMPOOL_F_SC_GET)
> > > 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> > > 	else
> > > 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > >
> > > What do you think?
> >
> > I think it will be a good thing - as in that case we'll always have
> > "ring_mp_mc" selected as default one.
> > As another thought, it porbably would be good to deprecate and later remove
> > MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
> > These days user can select this behaviour via mempool ops and such dualism
> > just makes things more error-prone and harder to maintain.
> > Especially as we don't have clear policy what should be the higher priority
> > for sync mode selection: mempool ops or flags.
> >
> 
> I'll tend to agree, however it would mean deprecate rte_mempool_create()
> too, because we wouldn't be able to set ops with it. Or we would have to
> add a 12th (!) argument to the function, to set the ops name.
> 
> I don't like having that many arguments to this function, but it seems
> it is widely used, probably because it is just one function call (vs
> create_empty + set_ops + populate). So adding a "ops_name" argument is
> maybe the right thing to do, given we can keep abi compat.

My thought was - just keep rte_mempool_create()
parameter list as it is, and always set ops to "ring_mp_mc" for it.
Users who'd like some other ops would force to use
create_empty+set_ops+populate.
That's pretty much the same what we have right now,
the only diff will be ring with SP/SC mode.

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v4 2/2] mempool/ring: add support for new ring sync modes
  2020-07-13 15:50       ` [dpdk-dev] [PATCH v4 2/2] mempool/ring: add support for new ring sync modes Konstantin Ananyev
@ 2020-07-13 17:37         ` Thomas Monjalon
  2020-07-14  9:16           ` Ananyev, Konstantin
  0 siblings, 1 reply; 25+ messages in thread
From: Thomas Monjalon @ 2020-07-13 17:37 UTC (permalink / raw)
  To: Konstantin Ananyev
  Cc: dev, olivier.matz, arybchenko, jielong.zjl, gage.eads,
	Konstantin Ananyev

13/07/2020 17:50, Konstantin Ananyev:
> Two new sync modes were introduced into rte_ring:
> relaxed tail sync (RTS) and head/tail sync (HTS).
> This change provides user with ability to select these
> modes for ring based mempool via mempool ops API.
> 
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> Acked-by: Gage Eads <gage.eads@intel.com>
> ---
>  doc/guides/mempool/ring.rst             | 10 +++
>  doc/guides/rel_notes/release_20_08.rst  |  6 ++
>  drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++---
>  3 files changed, 95 insertions(+), 9 deletions(-)
> 
> diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst
> index b8659c03f..9e2824070 100644
> --- a/doc/guides/mempool/ring.rst
> +++ b/doc/guides/mempool/ring.rst
> @@ -29,6 +29,16 @@ and can be selected via mempool ops API:
>    Underlying **rte_ring** operates in multi-thread producer,
>    single-thread consumer sync mode.
>  
> +- ``ring_mt_rts``
> +
> +  For underlying **rte_ring** both producer and consumer operate in
> +  multi-thread Relaxed Tail Sync (RTS) mode.
> +
> +- ``ring_mt_hts``
> +
> +  For underlying **rte_ring** both producer and consumer operate in
> +  multi-thread Haad-Tail Sync (HTS) mode.

Typo Haad -> Head

Please explain what these modes mean and why/when use them.




^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v4 2/2] mempool/ring: add support for new ring sync modes
  2020-07-13 17:37         ` Thomas Monjalon
@ 2020-07-14  9:16           ` Ananyev, Konstantin
  2020-07-15  9:59             ` Thomas Monjalon
  0 siblings, 1 reply; 25+ messages in thread
From: Ananyev, Konstantin @ 2020-07-14  9:16 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, olivier.matz, arybchenko, jielong.zjl, Eads, Gage

> 
> 13/07/2020 17:50, Konstantin Ananyev:
> > Two new sync modes were introduced into rte_ring:
> > relaxed tail sync (RTS) and head/tail sync (HTS).
> > This change provides user with ability to select these
> > modes for ring based mempool via mempool ops API.
> >
> > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > Acked-by: Gage Eads <gage.eads@intel.com>
> > ---
> >  doc/guides/mempool/ring.rst             | 10 +++
> >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> >  drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++---
> >  3 files changed, 95 insertions(+), 9 deletions(-)
> >
> > diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst
> > index b8659c03f..9e2824070 100644
> > --- a/doc/guides/mempool/ring.rst
> > +++ b/doc/guides/mempool/ring.rst
> > @@ -29,6 +29,16 @@ and can be selected via mempool ops API:
> >    Underlying **rte_ring** operates in multi-thread producer,
> >    single-thread consumer sync mode.
> >
> > +- ``ring_mt_rts``
> > +
> > +  For underlying **rte_ring** both producer and consumer operate in
> > +  multi-thread Relaxed Tail Sync (RTS) mode.
> > +
> > +- ``ring_mt_hts``
> > +
> > +  For underlying **rte_ring** both producer and consumer operate in
> > +  multi-thread Haad-Tail Sync (HTS) mode.
> 
> Typo Haad -> Head

Will fix.

> 
> Please explain what these modes mean and why/when use them.
> 

We have quite detailed explanation in ring library guide:
https://doc.dpdk.org/guides/prog_guide/ring_lib.html
7.6. Producer/consumer synchronization modes
So here, I put a link to ring library guide for those who need more details.
I can copy&paste same text here too,
but it probably not much point in such duplication.


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v4 2/2] mempool/ring: add support for new ring sync modes
  2020-07-14  9:16           ` Ananyev, Konstantin
@ 2020-07-15  9:59             ` Thomas Monjalon
  0 siblings, 0 replies; 25+ messages in thread
From: Thomas Monjalon @ 2020-07-15  9:59 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: dev, olivier.matz, arybchenko, jielong.zjl, Eads, Gage

14/07/2020 11:16, Ananyev, Konstantin:
> > 
> > 13/07/2020 17:50, Konstantin Ananyev:
> > > Two new sync modes were introduced into rte_ring:
> > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > This change provides user with ability to select these
> > > modes for ring based mempool via mempool ops API.
> > >
> > > Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> > > Acked-by: Gage Eads <gage.eads@intel.com>
> > > ---
> > > --- a/doc/guides/mempool/ring.rst
> > > +++ b/doc/guides/mempool/ring.rst
> > > @@ -29,6 +29,16 @@ and can be selected via mempool ops API:
> > >    Underlying **rte_ring** operates in multi-thread producer,
> > >    single-thread consumer sync mode.
> > >
> > > +- ``ring_mt_rts``
> > > +
> > > +  For underlying **rte_ring** both producer and consumer operate in
> > > +  multi-thread Relaxed Tail Sync (RTS) mode.
> > > +
> > > +- ``ring_mt_hts``
> > > +
> > > +  For underlying **rte_ring** both producer and consumer operate in
> > > +  multi-thread Haad-Tail Sync (HTS) mode.
> > 
> > Typo Haad -> Head
> 
> Will fix.
> 
> > 
> > Please explain what these modes mean and why/when use them.
> > 
> 
> We have quite detailed explanation in ring library guide:
> https://doc.dpdk.org/guides/prog_guide/ring_lib.html
> 7.6. Producer/consumer synchronization modes
> So here, I put a link to ring library guide for those who need more details.
> I can copy&paste same text here too,
> but it probably not much point in such duplication.

Do not duplicate.
Please add a direct link to the section:
https://doc.dpdk.org/guides/prog_guide/ring_lib.html#mp-rts-mc-rts

From a user perspective, I was looking for recommendation about
which mode is preferred in which case.
Maybe few words about what is an "overcommitted systems"?



^ permalink raw reply	[flat|nested] 25+ messages in thread

* [dpdk-dev] [PATCH v5 0/2] mempool/ring: add support for new ring sync modes
  2020-07-13 15:50     ` [dpdk-dev] [PATCH v4 0/2] " Konstantin Ananyev
  2020-07-13 15:50       ` [dpdk-dev] [PATCH v4 1/2] doc: add ring based mempool guide Konstantin Ananyev
  2020-07-13 15:50       ` [dpdk-dev] [PATCH v4 2/2] mempool/ring: add support for new ring sync modes Konstantin Ananyev
@ 2020-07-15 14:58       ` Konstantin Ananyev
  2020-07-15 14:58         ` [dpdk-dev] [PATCH v5 1/2] doc: add ring based mempool guide Konstantin Ananyev
  2020-07-15 14:58         ` [dpdk-dev] [PATCH v5 2/2] mempool/ring: add support for new ring sync modes Konstantin Ananyev
  2 siblings, 2 replies; 25+ messages in thread
From: Konstantin Ananyev @ 2020-07-15 14:58 UTC (permalink / raw)
  To: dev
  Cc: olivier.matz, arybchenko, jielong.zjl, gage.eads, thomas,
	Konstantin Ananyev

v5:
  few more comments from Thomas regarding the docs

v4:
  add new section into mempool derivers guide (Thomas Monjalon comments)

v3:
  silently ignore F_SP_PUT/F_SC_GET (Olivier Matz comments)

v2:
  update Release Notes (Gage Eads comments)

Konstantin Ananyev (2):
  doc: add ring based mempool guide
  mempool/ring: add support for new ring sync modes

 doc/guides/mempool/index.rst            |  1 +
 doc/guides/mempool/ring.rst             | 52 +++++++++++++++
 doc/guides/prog_guide/ring_lib.rst      |  8 +++
 doc/guides/rel_notes/release_20_08.rst  |  6 ++
 drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++---
 5 files changed, 146 insertions(+), 9 deletions(-)
 create mode 100644 doc/guides/mempool/ring.rst

-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* [dpdk-dev] [PATCH v5 1/2] doc: add ring based mempool guide
  2020-07-15 14:58       ` [dpdk-dev] [PATCH v5 0/2] " Konstantin Ananyev
@ 2020-07-15 14:58         ` Konstantin Ananyev
  2020-07-15 14:58         ` [dpdk-dev] [PATCH v5 2/2] mempool/ring: add support for new ring sync modes Konstantin Ananyev
  1 sibling, 0 replies; 25+ messages in thread
From: Konstantin Ananyev @ 2020-07-15 14:58 UTC (permalink / raw)
  To: dev
  Cc: olivier.matz, arybchenko, jielong.zjl, gage.eads, thomas,
	Konstantin Ananyev

Add documentation for rte_ring mempool driver.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 doc/guides/mempool/index.rst |  1 +
 doc/guides/mempool/ring.rst  | 34 ++++++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)
 create mode 100644 doc/guides/mempool/ring.rst

diff --git a/doc/guides/mempool/index.rst b/doc/guides/mempool/index.rst
index 756610264..bbd02fd81 100644
--- a/doc/guides/mempool/index.rst
+++ b/doc/guides/mempool/index.rst
@@ -13,3 +13,4 @@ application through the mempool API.
 
     octeontx
     octeontx2
+    ring
diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst
new file mode 100644
index 000000000..b8659c03f
--- /dev/null
+++ b/doc/guides/mempool/ring.rst
@@ -0,0 +1,34 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2020 Intel Corporation.
+
+Ring Mempool Driver
+==============================
+
+**rte_mempool_ring** is a pure SW mempool driver based on ``rte_ring``
+DPDK library. This is a default mempool driver.
+Following modes of operation are available for ``ring`` mempool driver
+and can be selected via mempool ops API:
+
+- ``ring_mp_mc``
+
+  Underlying **rte_ring** operates in multi-thread producer,
+  multi-thread consumer sync mode.
+
+- ``ring_sp_sc``
+
+  Underlying **rte_ring** operates in single-thread producer,
+  single-thread consumer sync mode.
+
+- ``ring_sp_mc``
+
+  Underlying **rte_ring** operates in single-thread producer,
+  multi-thread consumer sync mode.
+
+- ``ring_mp_sc``
+
+  Underlying **rte_ring** operates in multi-thread producer,
+  single-thread consumer sync mode.
+
+
+For more information about ``rte_ring`` structure, behaviour and available
+synchronisation modes please refer to: :doc:`../prog_guide/ring_lib`.
-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* [dpdk-dev] [PATCH v5 2/2] mempool/ring: add support for new ring sync modes
  2020-07-15 14:58       ` [dpdk-dev] [PATCH v5 0/2] " Konstantin Ananyev
  2020-07-15 14:58         ` [dpdk-dev] [PATCH v5 1/2] doc: add ring based mempool guide Konstantin Ananyev
@ 2020-07-15 14:58         ` Konstantin Ananyev
  2020-07-21 17:25           ` David Marchand
  1 sibling, 1 reply; 25+ messages in thread
From: Konstantin Ananyev @ 2020-07-15 14:58 UTC (permalink / raw)
  To: dev
  Cc: olivier.matz, arybchenko, jielong.zjl, gage.eads, thomas,
	Konstantin Ananyev

Two new sync modes were introduced into rte_ring:
relaxed tail sync (RTS) and head/tail sync (HTS).
This change provides user with ability to select these
modes for ring based mempool via mempool ops API.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Gage Eads <gage.eads@intel.com>
---
 doc/guides/mempool/ring.rst             | 22 ++++++-
 doc/guides/prog_guide/ring_lib.rst      |  8 +++
 doc/guides/rel_notes/release_20_08.rst  |  6 ++
 drivers/mempool/ring/rte_mempool_ring.c | 88 ++++++++++++++++++++++---
 4 files changed, 113 insertions(+), 11 deletions(-)

diff --git a/doc/guides/mempool/ring.rst b/doc/guides/mempool/ring.rst
index b8659c03f..ca03180ea 100644
--- a/doc/guides/mempool/ring.rst
+++ b/doc/guides/mempool/ring.rst
@@ -12,12 +12,14 @@ and can be selected via mempool ops API:
 - ``ring_mp_mc``
 
   Underlying **rte_ring** operates in multi-thread producer,
-  multi-thread consumer sync mode.
+  multi-thread consumer sync mode. For more information please refer to:
+  :ref:`Ring_Library_MPMC_Mode`.
 
 - ``ring_sp_sc``
 
   Underlying **rte_ring** operates in single-thread producer,
-  single-thread consumer sync mode.
+  single-thread consumer sync mode. For more information please refer to:
+  :ref:`Ring_Library_SPSC_Mode`.
 
 - ``ring_sp_mc``
 
@@ -29,6 +31,22 @@ and can be selected via mempool ops API:
   Underlying **rte_ring** operates in multi-thread producer,
   single-thread consumer sync mode.
 
+- ``ring_mt_rts``
+
+  For underlying **rte_ring** both producer and consumer operate in
+  multi-thread Relaxed Tail Sync (RTS) mode. For more information please
+  refer to: :ref:`Ring_Library_MT_RTS_Mode`.
+
+- ``ring_mt_hts``
+
+  For underlying **rte_ring** both producer and consumer operate in
+  multi-thread Head-Tail Sync (HTS) mode. For more information please
+  refer to: :ref:`Ring_Library_MT_HTS_Mode`.
+
 
+For 'classic' DPDK deployments (with one thread per core) ``ring_mp_mc``
+mode is usually the most suitable and the fastest one. For overcommitted
+scenarios (multiple threads share same set of cores) ``ring_mt_rts`` or
+``ring_mt_hts`` usually provide a better alternative.
 For more information about ``rte_ring`` structure, behaviour and available
 synchronisation modes please refer to: :doc:`../prog_guide/ring_lib`.
diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index f0a5a78b0..895484d95 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -359,6 +359,8 @@ That should help users to configure ring in the most suitable way for his
 specific usage scenarios.
 Currently supported modes:
 
+.. _Ring_Library_MPMC_Mode:
+
 MP/MC (default one)
 ~~~~~~~~~~~~~~~~~~~
 
@@ -369,11 +371,15 @@ per core) this is usually the most suitable and fastest synchronization mode.
 As a well known limitation - it can perform quite pure on some overcommitted
 scenarios.
 
+.. _Ring_Library_SPSC_Mode:
+
 SP/SC
 ~~~~~
 Single-producer (/single-consumer) mode. In this mode only one thread at a time
 is allowed to enqueue (/dequeue) objects to (/from) the ring.
 
+.. _Ring_Library_MT_RTS_Mode:
+
 MP_RTS/MC_RTS
 ~~~~~~~~~~~~~
 
@@ -390,6 +396,8 @@ one for head update, second for tail update.
 In comparison the original MP/MC algorithm requires one 32-bit CAS
 for head update and waiting/spinning on tail value.
 
+.. _Ring_Library_MT_HTS_Mode:
+
 MP_HTS/MC_HTS
 ~~~~~~~~~~~~~
 
diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index 17d70e7c1..db25c6f9c 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -69,6 +69,12 @@ New Features
   barriers. rte_*mb APIs, for ARMv8 platforms, are changed to use DMB
   instruction to reflect this.
 
+* **Added support for new sync modes into mempool ring driver.**
+
+  Added ability to select new ring synchronisation modes:
+  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
+  via mempool ops API.
+
 * **Added the support for vfio-pci new VF token interface.**
 
   From Linux 5.7, vfio-pci supports to bind both SR-IOV PF and the created VFs,
diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
index bc123fc52..b1f09ff28 100644
--- a/drivers/mempool/ring/rte_mempool_ring.c
+++ b/drivers/mempool/ring/rte_mempool_ring.c
@@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+	unsigned int n)
+{
+	return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static int
 common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
@@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
 			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
 }
 
+static int
+rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
+static int
+hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+	return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
+			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
+}
+
 static unsigned
 common_ring_get_count(const struct rte_mempool *mp)
 {
 	return rte_ring_count(mp->pool_data);
 }
 
-
 static int
-common_ring_alloc(struct rte_mempool *mp)
+ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
 {
-	int rg_flags = 0, ret;
+	int ret;
 	char rg_name[RTE_RING_NAMESIZE];
 	struct rte_ring *r;
 
@@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
 		return -rte_errno;
 	}
 
-	/* ring flags */
-	if (mp->flags & MEMPOOL_F_SP_PUT)
-		rg_flags |= RING_F_SP_ENQ;
-	if (mp->flags & MEMPOOL_F_SC_GET)
-		rg_flags |= RING_F_SC_DEQ;
-
 	/*
 	 * Allocate the ring that will be used to store objects.
 	 * Ring functions will return appropriate errors if we are
@@ -82,6 +105,31 @@ common_ring_alloc(struct rte_mempool *mp)
 	return 0;
 }
 
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+	uint32_t rg_flags = 0;
+
+	if (mp->flags & MEMPOOL_F_SP_PUT)
+		rg_flags |= RING_F_SP_ENQ;
+	if (mp->flags & MEMPOOL_F_SC_GET)
+		rg_flags |= RING_F_SC_DEQ;
+
+	return ring_alloc(mp, rg_flags);
+}
+
+static int
+rts_ring_alloc(struct rte_mempool *mp)
+{
+	return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
+}
+
+static int
+hts_ring_alloc(struct rte_mempool *mp)
+{
+	return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
+}
+
 static void
 common_ring_free(struct rte_mempool *mp)
 {
@@ -130,7 +178,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
 	.get_count = common_ring_get_count,
 };
 
+/* ops for mempool with ring in MT_RTS sync mode */
+static const struct rte_mempool_ops ops_mt_rts = {
+	.name = "ring_mt_rts",
+	.alloc = rts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = rts_ring_mp_enqueue,
+	.dequeue = rts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
+/* ops for mempool with ring in MT_HTS sync mode */
+static const struct rte_mempool_ops ops_mt_hts = {
+	.name = "ring_mt_hts",
+	.alloc = hts_ring_alloc,
+	.free = common_ring_free,
+	.enqueue = hts_ring_mp_enqueue,
+	.dequeue = hts_ring_mc_dequeue,
+	.get_count = common_ring_get_count,
+};
+
 MEMPOOL_REGISTER_OPS(ops_mp_mc);
 MEMPOOL_REGISTER_OPS(ops_sp_sc);
 MEMPOOL_REGISTER_OPS(ops_mp_sc);
 MEMPOOL_REGISTER_OPS(ops_sp_mc);
+MEMPOOL_REGISTER_OPS(ops_mt_rts);
+MEMPOOL_REGISTER_OPS(ops_mt_hts);
-- 
2.17.1


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [dpdk-dev] [PATCH v5 2/2] mempool/ring: add support for new ring sync modes
  2020-07-15 14:58         ` [dpdk-dev] [PATCH v5 2/2] mempool/ring: add support for new ring sync modes Konstantin Ananyev
@ 2020-07-21 17:25           ` David Marchand
  0 siblings, 0 replies; 25+ messages in thread
From: David Marchand @ 2020-07-21 17:25 UTC (permalink / raw)
  To: Konstantin Ananyev
  Cc: dev, Olivier Matz, Andrew Rybchenko, jielong.zjl, Gage Eads,
	Thomas Monjalon

On Wed, Jul 15, 2020 at 4:59 PM Konstantin Ananyev
<konstantin.ananyev@intel.com> wrote:
>
> Two new sync modes were introduced into rte_ring:
> relaxed tail sync (RTS) and head/tail sync (HTS).
> This change provides user with ability to select these
> modes for ring based mempool via mempool ops API.
>
> Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> Acked-by: Gage Eads <gage.eads@intel.com>

No change in behavior for existing users so I went and took it for -rc2.
Series applied, thanks.

-- 
David Marchand


^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2020-07-21 17:26 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-21 13:20 [dpdk-dev] [PATCH 20.08] mempool/ring: add support for new ring sync modes Konstantin Ananyev
2020-06-29 16:10 ` [dpdk-dev] [PATCH v2] " Konstantin Ananyev
2020-07-09 16:18   ` Olivier Matz
2020-07-09 17:55     ` Ananyev, Konstantin
2020-07-10 12:52       ` Olivier Matz
2020-07-10 15:15         ` Ananyev, Konstantin
2020-07-10 15:20           ` Ananyev, Konstantin
2020-07-13 13:30             ` Olivier Matz
2020-07-13 14:46               ` Ananyev, Konstantin
2020-07-13 15:00                 ` Olivier Matz
2020-07-13 16:29                   ` Ananyev, Konstantin
2020-07-10 16:21   ` [dpdk-dev] [PATCH v3] " Konstantin Ananyev
2020-07-10 22:44     ` Thomas Monjalon
2020-07-13 12:58       ` Ananyev, Konstantin
2020-07-13 13:57         ` Thomas Monjalon
2020-07-13 15:50     ` [dpdk-dev] [PATCH v4 0/2] " Konstantin Ananyev
2020-07-13 15:50       ` [dpdk-dev] [PATCH v4 1/2] doc: add ring based mempool guide Konstantin Ananyev
2020-07-13 15:50       ` [dpdk-dev] [PATCH v4 2/2] mempool/ring: add support for new ring sync modes Konstantin Ananyev
2020-07-13 17:37         ` Thomas Monjalon
2020-07-14  9:16           ` Ananyev, Konstantin
2020-07-15  9:59             ` Thomas Monjalon
2020-07-15 14:58       ` [dpdk-dev] [PATCH v5 0/2] " Konstantin Ananyev
2020-07-15 14:58         ` [dpdk-dev] [PATCH v5 1/2] doc: add ring based mempool guide Konstantin Ananyev
2020-07-15 14:58         ` [dpdk-dev] [PATCH v5 2/2] mempool/ring: add support for new ring sync modes Konstantin Ananyev
2020-07-21 17:25           ` David Marchand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).