DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
@ 2017-12-06 12:31 Hemant Agrawal
  2017-12-06 12:31 ` [dpdk-dev] [PATCH 2/2] mempool/dpaa: optimize phy to virt conversion Hemant Agrawal
                   ` (2 more replies)
  0 siblings, 3 replies; 14+ messages in thread
From: Hemant Agrawal @ 2017-12-06 12:31 UTC (permalink / raw)
  To: olivier.matz; +Cc: dev

This is required for the optimizations w.r.t hw mempools.
They will use different kind of optimizations if the buffers
are from single contiguous memzone.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 lib/librte_mempool/rte_mempool.c | 7 +++++--
 lib/librte_mempool/rte_mempool.h | 5 +++++
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index d50dba4..9d3737c 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
 
 	/* Detect pool area has sufficient space for elements */
-	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
-		if (len < total_elt_sz * mp->size) {
+	if (len < total_elt_sz * mp->size) {
+		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
 			RTE_LOG(ERR, MEMPOOL,
 				"pool area %" PRIx64 " not enough\n",
 				(uint64_t)len);
 			return -ENOSPC;
 		}
+	} else {
+		/* Memory will be allocated from multiple memzones */
+		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;
 	}
 
 	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 721227f..394a4fe 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -292,6 +292,11 @@ struct rte_mempool {
  */
 #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
 
+/* Indicates that the mempool buffers are allocated from multiple memzones
+ * the buffer may or may not be physically contiguous.
+ */
+#define MEMPOOL_F_MULTI_MEMZONE 0x0100
+
 /**
  * @internal When debug is enabled, store some statistics.
  *
-- 
2.7.4

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [PATCH 2/2] mempool/dpaa: optimize phy to virt conversion
  2017-12-06 12:31 [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones Hemant Agrawal
@ 2017-12-06 12:31 ` Hemant Agrawal
  2017-12-19 10:24 ` [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones Olivier MATZ
  2018-01-17  8:51 ` [dpdk-dev] [PATCH v2] mempool/dpaa: optimize phy to virt conversion Hemant Agrawal
  2 siblings, 0 replies; 14+ messages in thread
From: Hemant Agrawal @ 2017-12-06 12:31 UTC (permalink / raw)
  To: olivier.matz; +Cc: dev

If the allocation is from a single memzone, optimize
the phy-virt address conversions.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/mempool/dpaa/dpaa_mempool.c | 18 +++++++++++++++---
 drivers/mempool/dpaa/dpaa_mempool.h |  9 +++++++++
 2 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index f5ee80f..ac3e4ac 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -99,6 +99,7 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
 	rte_dpaa_bpid_info[bpid].meta_data_size =
 		sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
 	rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
+	rte_dpaa_bpid_info[bpid].ptov_off = 0;
 
 	bp_info = rte_malloc(NULL,
 			     sizeof(struct dpaa_bp_info),
@@ -171,9 +172,20 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
 	}
 
 	while (i < n) {
+		uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
+
+		if (unlikely(!bp_info->ptov_off)) {
+			/* buffers are not from multiple memzones */
+			if (!(bp_info->mp->flags & MEMPOOL_F_MULTI_MEMZONE)) {
+				bp_info->ptov_off
+						= (uint64_t)obj_table[i] - phy;
+				rte_dpaa_bpid_info[bp_info->bpid].ptov_off
+						= bp_info->ptov_off;
+			}
+		}
+
 		dpaa_buf_free(bp_info,
-			      (uint64_t)rte_mempool_virt2iova(obj_table[i]) +
-			      bp_info->meta_data_size);
+			      (uint64_t)phy + bp_info->meta_data_size);
 		i = i + 1;
 	}
 
@@ -241,7 +253,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
 			 * i.e. first buffer is valid, remaining 6 buffers
 			 * may be null.
 			 */
-			bufaddr = (void *)rte_dpaa_mem_ptov(bufs[i].addr);
+			bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
 			m[n] = (struct rte_mbuf *)((char *)bufaddr
 						- bp_info->meta_data_size);
 			DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 6795859..8160af6 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -61,8 +61,17 @@ struct dpaa_bp_info {
 	uint32_t size;
 	uint32_t meta_data_size;
 	int32_t dpaa_ops_index;
+	int64_t ptov_off;
 };
 
+static inline void *
+DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
+{
+	if (bp_info->ptov_off)
+		return ((void *)(addr + bp_info->ptov_off));
+	return rte_dpaa_mem_ptov(addr);
+}
+
 #define DPAA_MEMPOOL_TO_POOL_INFO(__mp) \
 	((struct dpaa_bp_info *)__mp->pool_data)
 
-- 
2.7.4

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-06 12:31 [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones Hemant Agrawal
  2017-12-06 12:31 ` [dpdk-dev] [PATCH 2/2] mempool/dpaa: optimize phy to virt conversion Hemant Agrawal
@ 2017-12-19 10:24 ` Olivier MATZ
  2017-12-19 10:46   ` Hemant Agrawal
  2018-01-17  8:51 ` [dpdk-dev] [PATCH v2] mempool/dpaa: optimize phy to virt conversion Hemant Agrawal
  2 siblings, 1 reply; 14+ messages in thread
From: Olivier MATZ @ 2017-12-19 10:24 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev

Hi Hemant,

On Wed, Dec 06, 2017 at 06:01:12PM +0530, Hemant Agrawal wrote:
> This is required for the optimizations w.r.t hw mempools.
> They will use different kind of optimizations if the buffers
> are from single contiguous memzone.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  lib/librte_mempool/rte_mempool.c | 7 +++++--
>  lib/librte_mempool/rte_mempool.h | 5 +++++
>  2 files changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> index d50dba4..9d3737c 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c
> @@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
>  	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
>  
>  	/* Detect pool area has sufficient space for elements */
> -	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
> -		if (len < total_elt_sz * mp->size) {
> +	if (len < total_elt_sz * mp->size) {
> +		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
>  			RTE_LOG(ERR, MEMPOOL,
>  				"pool area %" PRIx64 " not enough\n",
>  				(uint64_t)len);
>  			return -ENOSPC;
>  		}
> +	} else {
> +		/* Memory will be allocated from multiple memzones */
> +		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;
>  	}
>  
>  	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index 721227f..394a4fe 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -292,6 +292,11 @@ struct rte_mempool {
>   */
>  #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
>  
> +/* Indicates that the mempool buffers are allocated from multiple memzones
> + * the buffer may or may not be physically contiguous.
> + */
> +#define MEMPOOL_F_MULTI_MEMZONE 0x0100
> +
>  /**
>   * @internal When debug is enabled, store some statistics.
>   *
> -- 
> 2.7.4
> 

I'm not confortable with adding more and more flags, as I explained
here: http://dpdk.org/ml/archives/dev/2017-December/083909.html

It makes the generic code very complex, and probably buggy (many
flags are incompatible with other flags).

I'm thinking about moving the populate_* functions in the drivers
(this is described a bit more in the link above). What do you think
about this approach?

Thanks,
Olivier

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-19 10:24 ` [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones Olivier MATZ
@ 2017-12-19 10:46   ` Hemant Agrawal
  2017-12-19 11:02     ` Olivier MATZ
  0 siblings, 1 reply; 14+ messages in thread
From: Hemant Agrawal @ 2017-12-19 10:46 UTC (permalink / raw)
  To: Olivier MATZ; +Cc: dev

Hi Olivier,

On 12/19/2017 3:54 PM, Olivier MATZ wrote:
> Hi Hemant,
>
> On Wed, Dec 06, 2017 at 06:01:12PM +0530, Hemant Agrawal wrote:
>> This is required for the optimizations w.r.t hw mempools.
>> They will use different kind of optimizations if the buffers
>> are from single contiguous memzone.
>>
>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>> ---
>>  lib/librte_mempool/rte_mempool.c | 7 +++++--
>>  lib/librte_mempool/rte_mempool.h | 5 +++++
>>  2 files changed, 10 insertions(+), 2 deletions(-)
>>
>> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
>> index d50dba4..9d3737c 100644
>> --- a/lib/librte_mempool/rte_mempool.c
>> +++ b/lib/librte_mempool/rte_mempool.c
>> @@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
>>  	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
>>
>>  	/* Detect pool area has sufficient space for elements */
>> -	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
>> -		if (len < total_elt_sz * mp->size) {
>> +	if (len < total_elt_sz * mp->size) {
>> +		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
>>  			RTE_LOG(ERR, MEMPOOL,
>>  				"pool area %" PRIx64 " not enough\n",
>>  				(uint64_t)len);
>>  			return -ENOSPC;
>>  		}
>> +	} else {
>> +		/* Memory will be allocated from multiple memzones */
>> +		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;
>>  	}
>>
>>  	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
>> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
>> index 721227f..394a4fe 100644
>> --- a/lib/librte_mempool/rte_mempool.h
>> +++ b/lib/librte_mempool/rte_mempool.h
>> @@ -292,6 +292,11 @@ struct rte_mempool {
>>   */
>>  #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
>>
>> +/* Indicates that the mempool buffers are allocated from multiple memzones
>> + * the buffer may or may not be physically contiguous.
>> + */
>> +#define MEMPOOL_F_MULTI_MEMZONE 0x0100
>> +
>>  /**
>>   * @internal When debug is enabled, store some statistics.
>>   *
>> --
>> 2.7.4
>>
>
> I'm not confortable with adding more and more flags, as I explained
> here: http://dpdk.org/ml/archives/dev/2017-December/083909.html

This particular flag is not about how to populate mempool. This is just 
indicating how the mempool was populated - a status flag. This 
information is just helpful for the PMDs.

At least I am not able to see that this particular flag is being very 
driver specific.


>
> It makes the generic code very complex, and probably buggy (many
> flags are incompatible with other flags).
>
> I'm thinking about moving the populate_* functions in the drivers
> (this is described a bit more in the link above). What do you think
> about this approach?
>

The idea is good and it will give fine control to the individual 
mempools to populate the memory the way they want. However, on the 
downside, it will also lead to lot of duplicate code or similar code. It 
may also lead to a maintenance issue for the mempool PMD owner.





> Thanks,
> Olivier
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-19 10:46   ` Hemant Agrawal
@ 2017-12-19 11:02     ` Olivier MATZ
  2017-12-19 13:08       ` Hemant Agrawal
  2018-01-05 10:52       ` santosh
  0 siblings, 2 replies; 14+ messages in thread
From: Olivier MATZ @ 2017-12-19 11:02 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev

On Tue, Dec 19, 2017 at 04:16:33PM +0530, Hemant Agrawal wrote:
> Hi Olivier,
> 
> On 12/19/2017 3:54 PM, Olivier MATZ wrote:
> > Hi Hemant,
> > 
> > On Wed, Dec 06, 2017 at 06:01:12PM +0530, Hemant Agrawal wrote:
> > > This is required for the optimizations w.r.t hw mempools.
> > > They will use different kind of optimizations if the buffers
> > > are from single contiguous memzone.
> > > 
> > > Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> > > ---
> > >  lib/librte_mempool/rte_mempool.c | 7 +++++--
> > >  lib/librte_mempool/rte_mempool.h | 5 +++++
> > >  2 files changed, 10 insertions(+), 2 deletions(-)
> > > 
> > > diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
> > > index d50dba4..9d3737c 100644
> > > --- a/lib/librte_mempool/rte_mempool.c
> > > +++ b/lib/librte_mempool/rte_mempool.c
> > > @@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
> > >  	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
> > > 
> > >  	/* Detect pool area has sufficient space for elements */
> > > -	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
> > > -		if (len < total_elt_sz * mp->size) {
> > > +	if (len < total_elt_sz * mp->size) {
> > > +		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
> > >  			RTE_LOG(ERR, MEMPOOL,
> > >  				"pool area %" PRIx64 " not enough\n",
> > >  				(uint64_t)len);
> > >  			return -ENOSPC;
> > >  		}
> > > +	} else {
> > > +		/* Memory will be allocated from multiple memzones */
> > > +		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;
> > >  	}
> > > 
> > >  	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
> > > diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> > > index 721227f..394a4fe 100644
> > > --- a/lib/librte_mempool/rte_mempool.h
> > > +++ b/lib/librte_mempool/rte_mempool.h
> > > @@ -292,6 +292,11 @@ struct rte_mempool {
> > >   */
> > >  #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
> > > 
> > > +/* Indicates that the mempool buffers are allocated from multiple memzones
> > > + * the buffer may or may not be physically contiguous.
> > > + */
> > > +#define MEMPOOL_F_MULTI_MEMZONE 0x0100
> > > +
> > >  /**
> > >   * @internal When debug is enabled, store some statistics.
> > >   *
> > > --
> > > 2.7.4
> > > 
> > 
> > I'm not confortable with adding more and more flags, as I explained
> > here: http://dpdk.org/ml/archives/dev/2017-December/083909.html
> 
> This particular flag is not about how to populate mempool. This is just
> indicating how the mempool was populated - a status flag. This information
> is just helpful for the PMDs.
> 
> At least I am not able to see that this particular flag is being very driver
> specific.

That's true, I commented too fast :)
And what about using mp->nb_mem_chunks instead? Would it do the job
in your use-case?


> > It makes the generic code very complex, and probably buggy (many
> > flags are incompatible with other flags).
> > 
> > I'm thinking about moving the populate_* functions in the drivers
> > (this is described a bit more in the link above). What do you think
> > about this approach?
> > 
> 
> The idea is good and it will give fine control to the individual mempools to
> populate the memory the way they want. However, on the downside, it will
> also lead to lot of duplicate code or similar code. It may also lead to a
> maintenance issue for the mempool PMD owner.

Yes, that will be the drawback. If we do this, we should try to keep some
common helpers in the mempool lib.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-19 11:02     ` Olivier MATZ
@ 2017-12-19 13:08       ` Hemant Agrawal
  2017-12-20 11:59         ` Hemant Agrawal
  2018-01-05 10:52       ` santosh
  1 sibling, 1 reply; 14+ messages in thread
From: Hemant Agrawal @ 2017-12-19 13:08 UTC (permalink / raw)
  To: Olivier MATZ; +Cc: dev


> That's true, I commented too fast :)
> And what about using mp->nb_mem_chunks instead? Would it do the job
> in your use-case?

It should work.  Let me check it out.

Thanks
Regards,
Hemant

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-19 13:08       ` Hemant Agrawal
@ 2017-12-20 11:59         ` Hemant Agrawal
  2017-12-22 13:59           ` Olivier MATZ
  0 siblings, 1 reply; 14+ messages in thread
From: Hemant Agrawal @ 2017-12-20 11:59 UTC (permalink / raw)
  To: Olivier MATZ; +Cc: dev

On 12/19/2017 6:38 PM, Hemant Agrawal wrote:
>
>> That's true, I commented too fast :)
>> And what about using mp->nb_mem_chunks instead? Would it do the job
>> in your use-case?
>
> It should work.  Let me check it out.

There is a slight problem with nb_mem_chunks.

It is getting incremented in the end of "rte_mempool_populate_phys",
while the elements are getting populated before it in the call of 
mempool_add_elem.

I can use nb_mem_chunks are '0' check. However it can break in future if 
mempool_populate_phys changes.


>
> Thanks
> Regards,
> Hemant
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-20 11:59         ` Hemant Agrawal
@ 2017-12-22 13:59           ` Olivier MATZ
  2017-12-22 16:18             ` Hemant Agrawal
  0 siblings, 1 reply; 14+ messages in thread
From: Olivier MATZ @ 2017-12-22 13:59 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev

On Wed, Dec 20, 2017 at 05:29:59PM +0530, Hemant Agrawal wrote:
> On 12/19/2017 6:38 PM, Hemant Agrawal wrote:
> > 
> > > That's true, I commented too fast :)
> > > And what about using mp->nb_mem_chunks instead? Would it do the job
> > > in your use-case?
> > 
> > It should work.  Let me check it out.
> 
> There is a slight problem with nb_mem_chunks.
> 
> It is getting incremented in the end of "rte_mempool_populate_phys",
> while the elements are getting populated before it in the call of
> mempool_add_elem.
> 
> I can use nb_mem_chunks are '0' check. However it can break in future if
> mempool_populate_phys changes.

Sorry, I'm not sure I'm getting what you say.

My question was about using mp->nb_mem_chunks instead of a new flag in the
dppa driver. Am I missing something?

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-22 13:59           ` Olivier MATZ
@ 2017-12-22 16:18             ` Hemant Agrawal
  2018-01-16 13:51               ` Olivier Matz
  0 siblings, 1 reply; 14+ messages in thread
From: Hemant Agrawal @ 2017-12-22 16:18 UTC (permalink / raw)
  To: Olivier MATZ; +Cc: dev

On 12/22/2017 7:29 PM, Olivier MATZ wrote:
> On Wed, Dec 20, 2017 at 05:29:59PM +0530, Hemant Agrawal wrote:
>> On 12/19/2017 6:38 PM, Hemant Agrawal wrote:
>>>
>>>> That's true, I commented too fast :)
>>>> And what about using mp->nb_mem_chunks instead? Would it do the job
>>>> in your use-case?
>>>
>>> It should work.  Let me check it out.
>>
>> There is a slight problem with nb_mem_chunks.
>>
>> It is getting incremented in the end of "rte_mempool_populate_phys",
>> while the elements are getting populated before it in the call of
>> mempool_add_elem.
>>
>> I can use nb_mem_chunks are '0' check. However it can break in future if
>> mempool_populate_phys changes.
>
> Sorry, I'm not sure I'm getting what you say.
>
> My question was about using mp->nb_mem_chunks instead of a new flag in the
> dppa driver. Am I missing something?
>

mp->nb_mem_chunks gets finalized when the mempool is fully created. It's 
value is transient before that i.e. it will keep on changing on the 
every call to rte_mempool_populate_phys.

However, we need this information on the very first element allocation. 
So, nb_mem_chunks will not work.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-19 11:02     ` Olivier MATZ
  2017-12-19 13:08       ` Hemant Agrawal
@ 2018-01-05 10:52       ` santosh
  1 sibling, 0 replies; 14+ messages in thread
From: santosh @ 2018-01-05 10:52 UTC (permalink / raw)
  To: Olivier MATZ, Hemant Agrawal; +Cc: dev


On Tuesday 19 December 2017 04:32 PM, Olivier MATZ wrote:
> On Tue, Dec 19, 2017 at 04:16:33PM +0530, Hemant Agrawal wrote:
>> Hi Olivier,
>>
>> On 12/19/2017 3:54 PM, Olivier MATZ wrote:
>>> Hi Hemant,
>>>
>>> On Wed, Dec 06, 2017 at 06:01:12PM +0530, Hemant Agrawal wrote:
>>>> This is required for the optimizations w.r.t hw mempools.
>>>> They will use different kind of optimizations if the buffers
>>>> are from single contiguous memzone.
>>>>
>>>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>>>> ---
>>>>  lib/librte_mempool/rte_mempool.c | 7 +++++--
>>>>  lib/librte_mempool/rte_mempool.h | 5 +++++
>>>>  2 files changed, 10 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
>>>> index d50dba4..9d3737c 100644
>>>> --- a/lib/librte_mempool/rte_mempool.c
>>>> +++ b/lib/librte_mempool/rte_mempool.c
>>>> @@ -387,13 +387,16 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
>>>>  	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
>>>>
>>>>  	/* Detect pool area has sufficient space for elements */
>>>> -	if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
>>>> -		if (len < total_elt_sz * mp->size) {
>>>> +	if (len < total_elt_sz * mp->size) {
>>>> +		if (mp->flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
>>>>  			RTE_LOG(ERR, MEMPOOL,
>>>>  				"pool area %" PRIx64 " not enough\n",
>>>>  				(uint64_t)len);
>>>>  			return -ENOSPC;
>>>>  		}
>>>> +	} else {
>>>> +		/* Memory will be allocated from multiple memzones */
>>>> +		mp->flags |= MEMPOOL_F_MULTI_MEMZONE;
>>>>  	}
>>>>
>>>>  	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
>>>> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
>>>> index 721227f..394a4fe 100644
>>>> --- a/lib/librte_mempool/rte_mempool.h
>>>> +++ b/lib/librte_mempool/rte_mempool.h
>>>> @@ -292,6 +292,11 @@ struct rte_mempool {
>>>>   */
>>>>  #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
>>>>
>>>> +/* Indicates that the mempool buffers are allocated from multiple memzones
>>>> + * the buffer may or may not be physically contiguous.
>>>> + */
>>>> +#define MEMPOOL_F_MULTI_MEMZONE 0x0100
>>>> +
>>>>  /**
>>>>   * @internal When debug is enabled, store some statistics.
>>>>   *
>>>> --
>>>> 2.7.4
>>>>
>>> I'm not confortable with adding more and more flags, as I explained
>>> here: http://dpdk.org/ml/archives/dev/2017-December/083909.html
>> This particular flag is not about how to populate mempool. This is just
>> indicating how the mempool was populated - a status flag. This information
>> is just helpful for the PMDs.
>>
>> At least I am not able to see that this particular flag is being very driver
>> specific.
> That's true, I commented too fast :)
> And what about using mp->nb_mem_chunks instead? Would it do the job
> in your use-case?
>
>
>>> It makes the generic code very complex, and probably buggy (many
>>> flags are incompatible with other flags).
>>>
>>> I'm thinking about moving the populate_* functions in the drivers
>>> (this is described a bit more in the link above). What do you think
>>> about this approach?
>>>
>> The idea is good and it will give fine control to the individual mempools to
>> populate the memory the way they want. However, on the downside, it will
>> also lead to lot of duplicate code or similar code. It may also lead to a
>> maintenance issue for the mempool PMD owner.
> Yes, that will be the drawback. If we do this, we should try to keep some
> common helpers in the mempool lib.

Sorry for jumping late on this and not responding to other thread.

Olivier, We in-fact I tried said approach for ONA mempool driver but never proposed ;) for the reason which
was pointed by Hemant.. meaning more code duplication across mempool PMD thus more maintenance burden. 
However, I'm in favor of giving more control to driver. 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2017-12-22 16:18             ` Hemant Agrawal
@ 2018-01-16 13:51               ` Olivier Matz
  2018-01-17  7:49                 ` Hemant Agrawal
  0 siblings, 1 reply; 14+ messages in thread
From: Olivier Matz @ 2018-01-16 13:51 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev

On Fri, Dec 22, 2017 at 09:48:01PM +0530, Hemant Agrawal wrote:
> On 12/22/2017 7:29 PM, Olivier MATZ wrote:
> > On Wed, Dec 20, 2017 at 05:29:59PM +0530, Hemant Agrawal wrote:
> > > On 12/19/2017 6:38 PM, Hemant Agrawal wrote:
> > > > 
> > > > > That's true, I commented too fast :)
> > > > > And what about using mp->nb_mem_chunks instead? Would it do the job
> > > > > in your use-case?
> > > > 
> > > > It should work.  Let me check it out.
> > > 
> > > There is a slight problem with nb_mem_chunks.
> > > 
> > > It is getting incremented in the end of "rte_mempool_populate_phys",
> > > while the elements are getting populated before it in the call of
> > > mempool_add_elem.
> > > 
> > > I can use nb_mem_chunks are '0' check. However it can break in future if
> > > mempool_populate_phys changes.
> > 
> > Sorry, I'm not sure I'm getting what you say.
> > 
> > My question was about using mp->nb_mem_chunks instead of a new flag in the
> > dppa driver. Am I missing something?
> > 
> 
> mp->nb_mem_chunks gets finalized when the mempool is fully created. It's
> value is transient before that i.e. it will keep on changing on the every
> call to rte_mempool_populate_phys.
> 
> However, we need this information on the very first element allocation. So,
> nb_mem_chunks will not work.

I see 2 other alternatives:

1/ in your driver, register a callback rte_mempool_ops_register_memory_area()
   that sets a private flag if (len < total_elt_sz * mp->size).

2/ Move
    STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
    mp->nb_mem_chunks++;
   before the calls to mempool_add_elem(), and in your driver check if
   SLIST_FIRST(&mp->mem_list)->len < total_elt_sz * mp->size

If we can avoid to again create another flag, it is better.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones
  2018-01-16 13:51               ` Olivier Matz
@ 2018-01-17  7:49                 ` Hemant Agrawal
  0 siblings, 0 replies; 14+ messages in thread
From: Hemant Agrawal @ 2018-01-17  7:49 UTC (permalink / raw)
  To: Olivier Matz; +Cc: dev

Hi Olivier,

On 1/16/2018 7:21 PM, Olivier Matz wrote:
> On Fri, Dec 22, 2017 at 09:48:01PM +0530, Hemant Agrawal wrote:
>> On 12/22/2017 7:29 PM, Olivier MATZ wrote:
>>> On Wed, Dec 20, 2017 at 05:29:59PM +0530, Hemant Agrawal wrote:
>>>> On 12/19/2017 6:38 PM, Hemant Agrawal wrote:
>>>>>
>>>>>> That's true, I commented too fast :)
>>>>>> And what about using mp->nb_mem_chunks instead? Would it do the job
>>>>>> in your use-case?
>>>>>
>>>>> It should work.  Let me check it out.
>>>>
>>>> There is a slight problem with nb_mem_chunks.
>>>>
>>>> It is getting incremented in the end of "rte_mempool_populate_phys",
>>>> while the elements are getting populated before it in the call of
>>>> mempool_add_elem.
>>>>
>>>> I can use nb_mem_chunks are '0' check. However it can break in future if
>>>> mempool_populate_phys changes.
>>>
>>> Sorry, I'm not sure I'm getting what you say.
>>>
>>> My question was about using mp->nb_mem_chunks instead of a new flag in the
>>> dppa driver. Am I missing something?
>>>
>>
>> mp->nb_mem_chunks gets finalized when the mempool is fully created. It's
>> value is transient before that i.e. it will keep on changing on the every
>> call to rte_mempool_populate_phys.
>>
>> However, we need this information on the very first element allocation. So,
>> nb_mem_chunks will not work.
>
> I see 2 other alternatives:
>
> 1/ in your driver, register a callback rte_mempool_ops_register_memory_area()
>    that sets a private flag if (len < total_elt_sz * mp->size).
>

Thanks!
This one works. Now, the changes will be confined to dpaa code only. I 
will send a v2 for that.

> 2/ Move
>     STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
>     mp->nb_mem_chunks++;
>    before the calls to mempool_add_elem(), and in your driver check if
>    SLIST_FIRST(&mp->mem_list)->len < total_elt_sz * mp->size
>
> If we can avoid to again create another flag, it is better.
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-dev] [PATCH v2] mempool/dpaa: optimize phy to virt conversion
  2017-12-06 12:31 [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones Hemant Agrawal
  2017-12-06 12:31 ` [dpdk-dev] [PATCH 2/2] mempool/dpaa: optimize phy to virt conversion Hemant Agrawal
  2017-12-19 10:24 ` [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones Olivier MATZ
@ 2018-01-17  8:51 ` Hemant Agrawal
  2018-01-18 23:28   ` Thomas Monjalon
  2 siblings, 1 reply; 14+ messages in thread
From: Hemant Agrawal @ 2018-01-17  8:51 UTC (permalink / raw)
  To: olivier.matz; +Cc: dev

If the allocation is from a single memzone, optimize
the phy-virt address conversions.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
v2: use register memory area instead of new flag in mempool

 drivers/mempool/dpaa/dpaa_mempool.c | 50 ++++++++++++++++++++++++++++++++++---
 drivers/mempool/dpaa/dpaa_mempool.h | 13 ++++++++++
 2 files changed, 60 insertions(+), 3 deletions(-)

diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index ffb81c2..ddc4e47 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -73,6 +73,8 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
 	rte_dpaa_bpid_info[bpid].meta_data_size =
 		sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
 	rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
+	rte_dpaa_bpid_info[bpid].ptov_off = 0;
+	rte_dpaa_bpid_info[bpid].flags = 0;
 
 	bp_info = rte_malloc(NULL,
 			     sizeof(struct dpaa_bp_info),
@@ -145,9 +147,20 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
 	}
 
 	while (i < n) {
+		uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
+
+		if (unlikely(!bp_info->ptov_off)) {
+			/* buffers are not from multiple memzones */
+			if (!(bp_info->flags & DPAA_MPOOL_MULTI_MEMZONE)) {
+				bp_info->ptov_off
+						= (uint64_t)obj_table[i] - phy;
+				rte_dpaa_bpid_info[bp_info->bpid].ptov_off
+						= bp_info->ptov_off;
+			}
+		}
+
 		dpaa_buf_free(bp_info,
-			      (uint64_t)rte_mempool_virt2iova(obj_table[i]) +
-			      bp_info->meta_data_size);
+			      (uint64_t)phy + bp_info->meta_data_size);
 		i = i + 1;
 	}
 
@@ -215,7 +228,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
 			 * i.e. first buffer is valid, remaining 6 buffers
 			 * may be null.
 			 */
-			bufaddr = (void *)rte_dpaa_mem_ptov(bufs[i].addr);
+			bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
 			m[n] = (struct rte_mbuf *)((char *)bufaddr
 						- bp_info->meta_data_size);
 			DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
@@ -246,6 +259,36 @@ dpaa_mbuf_get_count(const struct rte_mempool *mp)
 	return bman_query_free_buffers(bp_info->bp);
 }
 
+static int
+dpaa_register_memory_area(const struct rte_mempool *mp,
+			  char *vaddr __rte_unused,
+			  rte_iova_t paddr __rte_unused,
+			  size_t len)
+{
+	struct dpaa_bp_info *bp_info;
+	unsigned int total_elt_sz;
+
+	MEMPOOL_INIT_FUNC_TRACE();
+
+	if (!mp || !mp->pool_data) {
+		DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
+		return 0;
+	}
+
+	bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+	DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n",
+			   len, total_elt_sz * mp->size);
+
+	/* Detect pool area has sufficient space for elements in this memzone */
+	if (len < total_elt_sz * mp->size)
+		/* Else, Memory will be allocated from multiple memzones */
+		bp_info->flags |= DPAA_MPOOL_MULTI_MEMZONE;
+
+	return 0;
+}
+
 struct rte_mempool_ops dpaa_mpool_ops = {
 	.name = "dpaa",
 	.alloc = dpaa_mbuf_create_pool,
@@ -253,6 +296,7 @@ struct rte_mempool_ops dpaa_mpool_ops = {
 	.enqueue = dpaa_mbuf_free_bulk,
 	.dequeue = dpaa_mbuf_alloc_bulk,
 	.get_count = dpaa_mbuf_get_count,
+	.register_memory_area = dpaa_register_memory_area,
 };
 
 MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 91da62f..02aa513 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -28,6 +28,9 @@
 /* Maximum release/acquire from BMAN */
 #define DPAA_MBUF_MAX_ACQ_REL  8
 
+/* Buffers are allocated from multiple memzones i.e. non phys contiguous */
+#define DPAA_MPOOL_MULTI_MEMZONE  0x01
+
 struct dpaa_bp_info {
 	struct rte_mempool *mp;
 	struct bman_pool *bp;
@@ -35,8 +38,18 @@ struct dpaa_bp_info {
 	uint32_t size;
 	uint32_t meta_data_size;
 	int32_t dpaa_ops_index;
+	int64_t ptov_off;
+	uint8_t flags;
 };
 
+static inline void *
+DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
+{
+	if (bp_info->ptov_off)
+		return ((void *)(addr + bp_info->ptov_off));
+	return rte_dpaa_mem_ptov(addr);
+}
+
 #define DPAA_MEMPOOL_TO_POOL_INFO(__mp) \
 	((struct dpaa_bp_info *)__mp->pool_data)
 
-- 
2.7.4

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mempool/dpaa: optimize phy to virt conversion
  2018-01-17  8:51 ` [dpdk-dev] [PATCH v2] mempool/dpaa: optimize phy to virt conversion Hemant Agrawal
@ 2018-01-18 23:28   ` Thomas Monjalon
  0 siblings, 0 replies; 14+ messages in thread
From: Thomas Monjalon @ 2018-01-18 23:28 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev, olivier.matz

17/01/2018 09:51, Hemant Agrawal:
> If the allocation is from a single memzone, optimize
> the phy-virt address conversions.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
> v2: use register memory area instead of new flag in mempool

Applied, thanks

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2018-01-18 23:29 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-06 12:31 [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones Hemant Agrawal
2017-12-06 12:31 ` [dpdk-dev] [PATCH 2/2] mempool/dpaa: optimize phy to virt conversion Hemant Agrawal
2017-12-19 10:24 ` [dpdk-dev] [PATCH 1/2] mempool: indicate the usages of multi memzones Olivier MATZ
2017-12-19 10:46   ` Hemant Agrawal
2017-12-19 11:02     ` Olivier MATZ
2017-12-19 13:08       ` Hemant Agrawal
2017-12-20 11:59         ` Hemant Agrawal
2017-12-22 13:59           ` Olivier MATZ
2017-12-22 16:18             ` Hemant Agrawal
2018-01-16 13:51               ` Olivier Matz
2018-01-17  7:49                 ` Hemant Agrawal
2018-01-05 10:52       ` santosh
2018-01-17  8:51 ` [dpdk-dev] [PATCH v2] mempool/dpaa: optimize phy to virt conversion Hemant Agrawal
2018-01-18 23:28   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).