* [dpdk-dev] [PATCH] mempool: Introduce _populate_mz_range api
@ 2017-01-20 14:20 santosh.shukla
2017-01-20 14:38 ` Jerin Jacob
2017-01-20 15:13 ` [dpdk-dev] [PATCH v2] " santosh.shukla
0 siblings, 2 replies; 7+ messages in thread
From: santosh.shukla @ 2017-01-20 14:20 UTC (permalink / raw)
To: olivier.matz, dev
Cc: thomas.monjalon, jerin.jacob, hemant.agrawal, Santosh Shukla
From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
HW pool manager e.g. Cavium SoC need s/w to program start and
end address of pool. Currently there is no such api in ext-mempool.
So introducing _populate_mz_range API which will let HW(pool manager)
know about hugepage mapped virtual start and end address.
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
lib/librte_mempool/rte_mempool.c | 4 ++++
lib/librte_mempool/rte_mempool.h | 22 ++++++++++++++++++++++
lib/librte_mempool/rte_mempool_ops.c | 17 +++++++++++++++++
lib/librte_mempool/rte_mempool_ring.c | 4 ++++
lib/librte_mempool/rte_mempool_stack.c | 3 ++-
5 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 1c2aed8..9a39f5c 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -568,6 +568,10 @@ static unsigned optimize_object_size(unsigned obj_size)
else
paddr = mz->phys_addr;
+ /* Populate mz range */
+ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0)
+ rte_mempool_ops_populate_mz_range(mp, mz);
+
if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
ret = rte_mempool_populate_phys(mp, mz->addr,
paddr, mz->len,
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index d0f5b27..3ae8aa8 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -387,6 +387,12 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
*/
typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
+/**
+ * Set the memzone va/pa addr range and len in the external pool.
+ */
+typedef void (*rte_mempool_populate_mz_range_t)(struct rte_mempool *mp,
+ const struct rte_memzone *mz);
+
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
@@ -395,6 +401,8 @@ struct rte_mempool_ops {
rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
rte_mempool_get_count get_count; /**< Get qty of available objs. */
+ rte_mempool_populate_mz_range_t populate_mz_range; /**< set per pool
+ memzone info */
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
@@ -438,6 +446,20 @@ struct rte_mempool_ops_table {
}
/**
+ * @internal Wrapper for mempool_ops populate memzone's va/pa addr callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ *
+ * @param mz
+ * Pointer to the memory zone.
+ */
+void
+rte_mempool_ops_populate_mz_range(struct rte_mempool *mp,
+ const struct rte_memzone *mz);
+
+
+/**
* @internal Wrapper for mempool_ops alloc callback.
*
* @param mp
diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c
index 5f24de2..ea79fc1 100644
--- a/lib/librte_mempool/rte_mempool_ops.c
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -85,12 +85,29 @@ struct rte_mempool_ops_table rte_mempool_ops_table = {
ops->enqueue = h->enqueue;
ops->dequeue = h->dequeue;
ops->get_count = h->get_count;
+ ops->populate_mz_range = h->populate_mz_range;
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
return ops_index;
}
+/*
+ * wrapper to populate mz's pa/va addr range and len info to external
+ * mempool. HW mempool implementation to cache-in this inforamation
+ * in their local data structure.
+ * Note: api always get called before ops_alloc().
+ * */
+void
+rte_mempool_ops_populate_mz_range(struct rte_mempool *mp,
+ const struct rte_memzone *mz)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->populate_mz_range(mp, mz);
+}
+
/* wrapper to allocate an external mempool's private (pool) data. */
int
rte_mempool_ops_alloc(struct rte_mempool *mp)
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index b9aa64d..7d32384 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -126,6 +126,7 @@
.enqueue = common_ring_mp_enqueue,
.dequeue = common_ring_mc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_sp_sc = {
@@ -135,6 +136,7 @@
.enqueue = common_ring_sp_enqueue,
.dequeue = common_ring_sc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_mp_sc = {
@@ -144,6 +146,7 @@
.enqueue = common_ring_mp_enqueue,
.dequeue = common_ring_sc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_sp_mc = {
@@ -153,6 +156,7 @@
.enqueue = common_ring_sp_enqueue,
.dequeue = common_ring_mc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
MEMPOOL_REGISTER_OPS(ops_mp_mc);
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
index 5fd8af2..6b0b2bd 100644
--- a/lib/librte_mempool/rte_mempool_stack.c
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -141,7 +141,8 @@ struct rte_mempool_stack {
.free = stack_free,
.enqueue = stack_enqueue,
.dequeue = stack_dequeue,
- .get_count = stack_get_count
+ .get_count = stack_get_count,
+ .populate_mz_range = NULL
};
MEMPOOL_REGISTER_OPS(ops_stack);
--
1.7.9.5
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH] mempool: Introduce _populate_mz_range api
2017-01-20 14:20 [dpdk-dev] [PATCH] mempool: Introduce _populate_mz_range api santosh.shukla
@ 2017-01-20 14:38 ` Jerin Jacob
2017-01-20 15:13 ` [dpdk-dev] [PATCH v2] " santosh.shukla
1 sibling, 0 replies; 7+ messages in thread
From: Jerin Jacob @ 2017-01-20 14:38 UTC (permalink / raw)
To: santosh.shukla; +Cc: olivier.matz, dev, thomas.monjalon, hemant.agrawal
On Fri, Jan 20, 2017 at 07:50:17PM +0530, santosh.shukla@caviumnetworks.com wrote:
> From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
>
> +/*
> + * wrapper to populate mz's pa/va addr range and len info to external
> + * mempool. HW mempool implementation to cache-in this inforamation
> + * in their local data structure.
> + * Note: api always get called before ops_alloc().
> + * */
> +void
> +rte_mempool_ops_populate_mz_range(struct rte_mempool *mp,
> + const struct rte_memzone *mz)
> +{
> + struct rte_mempool_ops *ops;
> +
> + ops = rte_mempool_get_ops(mp->ops_index);
> + return ops->populate_mz_range(mp, mz);
Check for the NULL before calling the function pointer.
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH v2] mempool: Introduce _populate_mz_range api
2017-01-20 14:20 [dpdk-dev] [PATCH] mempool: Introduce _populate_mz_range api santosh.shukla
2017-01-20 14:38 ` Jerin Jacob
@ 2017-01-20 15:13 ` santosh.shukla
2017-01-31 10:31 ` Olivier Matz
1 sibling, 1 reply; 7+ messages in thread
From: santosh.shukla @ 2017-01-20 15:13 UTC (permalink / raw)
To: olivier.matz, dev
Cc: thomas.monjalon, jerin.jacob, hemant.agrawal, Santosh Shukla
From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
HW pool manager e.g. Cavium SoC need s/w to program start and
end address of pool. Currently there is no such api in ext-mempool.
So introducing _populate_mz_range API which will let HW(pool manager)
know about hugepage mapped virtual start and end address.
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
v1-->v2:
- added ops->populate_mz_range NULL check
- Removed return ops->populate_mz_range; As function is void.
lib/librte_mempool/rte_mempool.c | 4 ++++
lib/librte_mempool/rte_mempool.h | 22 ++++++++++++++++++++++
lib/librte_mempool/rte_mempool_ops.c | 18 ++++++++++++++++++
lib/librte_mempool/rte_mempool_ring.c | 4 ++++
lib/librte_mempool/rte_mempool_stack.c | 3 ++-
5 files changed, 50 insertions(+), 1 deletion(-)
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 1c2aed8..9a39f5c 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -568,6 +568,10 @@ static unsigned optimize_object_size(unsigned obj_size)
else
paddr = mz->phys_addr;
+ /* Populate mz range */
+ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0)
+ rte_mempool_ops_populate_mz_range(mp, mz);
+
if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
ret = rte_mempool_populate_phys(mp, mz->addr,
paddr, mz->len,
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index d0f5b27..3ae8aa8 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -387,6 +387,12 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
*/
typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
+/**
+ * Set the memzone va/pa addr range and len in the external pool.
+ */
+typedef void (*rte_mempool_populate_mz_range_t)(struct rte_mempool *mp,
+ const struct rte_memzone *mz);
+
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
@@ -395,6 +401,8 @@ struct rte_mempool_ops {
rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
rte_mempool_get_count get_count; /**< Get qty of available objs. */
+ rte_mempool_populate_mz_range_t populate_mz_range; /**< set per pool
+ memzone info */
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
@@ -438,6 +446,20 @@ struct rte_mempool_ops_table {
}
/**
+ * @internal Wrapper for mempool_ops populate memzone's va/pa addr callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ *
+ * @param mz
+ * Pointer to the memory zone.
+ */
+void
+rte_mempool_ops_populate_mz_range(struct rte_mempool *mp,
+ const struct rte_memzone *mz);
+
+
+/**
* @internal Wrapper for mempool_ops alloc callback.
*
* @param mp
diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c
index 5f24de2..6c2bc7b 100644
--- a/lib/librte_mempool/rte_mempool_ops.c
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -85,12 +85,30 @@ struct rte_mempool_ops_table rte_mempool_ops_table = {
ops->enqueue = h->enqueue;
ops->dequeue = h->dequeue;
ops->get_count = h->get_count;
+ ops->populate_mz_range = h->populate_mz_range;
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
return ops_index;
}
+/*
+ * wrapper to populate mz's pa/va addr range and len info to external
+ * mempool. HW mempool implementation to cache-in this inforamation
+ * in their local data structure.
+ * Note: api always get called before ops_alloc().
+ * */
+void
+rte_mempool_ops_populate_mz_range(struct rte_mempool *mp,
+ const struct rte_memzone *mz)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (ops->populate_mz_range)
+ ops->populate_mz_range(mp, mz);
+}
+
/* wrapper to allocate an external mempool's private (pool) data. */
int
rte_mempool_ops_alloc(struct rte_mempool *mp)
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index b9aa64d..7d32384 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -126,6 +126,7 @@
.enqueue = common_ring_mp_enqueue,
.dequeue = common_ring_mc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_sp_sc = {
@@ -135,6 +136,7 @@
.enqueue = common_ring_sp_enqueue,
.dequeue = common_ring_sc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_mp_sc = {
@@ -144,6 +146,7 @@
.enqueue = common_ring_mp_enqueue,
.dequeue = common_ring_sc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_sp_mc = {
@@ -153,6 +156,7 @@
.enqueue = common_ring_sp_enqueue,
.dequeue = common_ring_mc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
MEMPOOL_REGISTER_OPS(ops_mp_mc);
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
index 5fd8af2..6b0b2bd 100644
--- a/lib/librte_mempool/rte_mempool_stack.c
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -141,7 +141,8 @@ struct rte_mempool_stack {
.free = stack_free,
.enqueue = stack_enqueue,
.dequeue = stack_dequeue,
- .get_count = stack_get_count
+ .get_count = stack_get_count,
+ .populate_mz_range = NULL
};
MEMPOOL_REGISTER_OPS(ops_stack);
--
1.7.9.5
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2] mempool: Introduce _populate_mz_range api
2017-01-20 15:13 ` [dpdk-dev] [PATCH v2] " santosh.shukla
@ 2017-01-31 10:31 ` Olivier Matz
2017-01-31 14:32 ` Santosh Shukla
0 siblings, 1 reply; 7+ messages in thread
From: Olivier Matz @ 2017-01-31 10:31 UTC (permalink / raw)
To: santosh.shukla; +Cc: dev, thomas.monjalon, jerin.jacob, hemant.agrawal
Hi Santosh,
I guess this patch is targeted for 17.05, right?
Please see some other comments below.
On Fri, 20 Jan 2017 20:43:41 +0530, <santosh.shukla@caviumnetworks.com>
wrote:
> From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
>
> HW pool manager e.g. Cavium SoC need s/w to program start and
> end address of pool. Currently there is no such api in ext-mempool.
Today, the mempool objects are not necessarily contiguous in
virtual or physical memory. The only assumption that can be made is
that each object is contiguous (virtually and physically). If the flag
MEMPOOL_F_NO_PHYS_CONTIG is passed, each object is assured to be
contiguous virtually.
> So introducing _populate_mz_range API which will let HW(pool manager)
> know about hugepage mapped virtual start and end address.
rte_mempool_ops_populate_mz_range() looks a bit long. What about
rte_mempool_ops_populate() instead?
> diff --git a/lib/librte_mempool/rte_mempool.c
> b/lib/librte_mempool/rte_mempool.c index 1c2aed8..9a39f5c 100644
> --- a/lib/librte_mempool/rte_mempool.c
> +++ b/lib/librte_mempool/rte_mempool.c
> @@ -568,6 +568,10 @@ static unsigned optimize_object_size(unsigned
> obj_size) else
> paddr = mz->phys_addr;
>
> + /* Populate mz range */
> + if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0)
> + rte_mempool_ops_populate_mz_range(mp, mz);
> +
> if (rte_eal_has_hugepages()
Given what I've said above, I think the populate() callback should be
in rte_mempool_populate_phys() instead of
rte_mempool_populate_default(). It would be called for each contiguous
zone.
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -387,6 +387,12 @@ typedef int (*rte_mempool_dequeue_t)(struct
> rte_mempool *mp, */
> typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool
> *mp); +/**
> + * Set the memzone va/pa addr range and len in the external pool.
> + */
> +typedef void (*rte_mempool_populate_mz_range_t)(struct rte_mempool
> *mp,
> + const struct rte_memzone *mz);
> +
And this API would be:
typedef void (*rte_mempool_populate_t)(struct rte_mempool *mp,
char *vaddr, phys_addr_t paddr, size_t len)
If your hw absolutly needs a contiguous memory, a solution could be:
- add a new flag MEMPOOL_F_CONTIG (maybe a better nale could be
found) saying that all the mempool objects must be contiguous.
- add the ops_populate() callback in rte_mempool_populate_phys(), as
suggested above
Then:
/* create an empty mempool */
rte_mempool_create_empty(...);
/* set the handler:
* in the ext handler, the mempool flags are updated with
* MEMPOOL_F_CONTIG
*/
rte_mempool_set_ops_byname(..., "my_hardware");
/* if MEMPOOL_F_CONTIG is set, all populate() functions should ensure
* that there is only one contiguous zone
*/
rte_mempool_populate_default(...);
Regards,
Olivier
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2] mempool: Introduce _populate_mz_range api
2017-01-31 10:31 ` Olivier Matz
@ 2017-01-31 14:32 ` Santosh Shukla
2017-02-06 17:01 ` Olivier Matz
0 siblings, 1 reply; 7+ messages in thread
From: Santosh Shukla @ 2017-01-31 14:32 UTC (permalink / raw)
To: Olivier Matz; +Cc: dev, thomas.monjalon, jerin.jacob, hemant.agrawal
Hi Olivier,
Reply inline.
On Tue, Jan 31, 2017 at 11:31:51AM +0100, Olivier Matz wrote:
> Hi Santosh,
>
> I guess this patch is targeted for 17.05, right?
>
Yes.
> Please see some other comments below.
>
> On Fri, 20 Jan 2017 20:43:41 +0530, <santosh.shukla@caviumnetworks.com>
> wrote:
> > From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
> >
> > HW pool manager e.g. Cavium SoC need s/w to program start and
> > end address of pool. Currently there is no such api in ext-mempool.
>
> Today, the mempool objects are not necessarily contiguous in
> virtual or physical memory. The only assumption that can be made is
> that each object is contiguous (virtually and physically). If the flag
> MEMPOOL_F_NO_PHYS_CONTIG is passed, each object is assured to be
> contiguous virtually.
>
Some clarification: IIUC then pa/va addr for each hugepage (aka mz)
is contiguous but no gaurantee of contiguity across the hugepages (aka
muli-mzs), Right?
If above said is correct then case like pool start addr in one mz and
end addr is on another mz is a problem scenario. Correct? That said then
ext-mempool drivers will get affected in such cases.
> > So introducing _populate_mz_range API which will let HW(pool manager)
> > know about hugepage mapped virtual start and end address.
>
> rte_mempool_ops_populate_mz_range() looks a bit long. What about
> rte_mempool_ops_populate() instead?
Ok.
> > diff --git a/lib/librte_mempool/rte_mempool.c
> > b/lib/librte_mempool/rte_mempool.c index 1c2aed8..9a39f5c 100644
> > --- a/lib/librte_mempool/rte_mempool.c
> > +++ b/lib/librte_mempool/rte_mempool.c
> > @@ -568,6 +568,10 @@ static unsigned optimize_object_size(unsigned
> > obj_size) else
> > paddr = mz->phys_addr;
> >
> > + /* Populate mz range */
> > + if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0)
> > + rte_mempool_ops_populate_mz_range(mp, mz);
> > +
> > if (rte_eal_has_hugepages()
>
> Given what I've said above, I think the populate() callback should be
> in rte_mempool_populate_phys() instead of
> rte_mempool_populate_default(). It would be called for each contiguous
> zone.
>
Ok.
> > --- a/lib/librte_mempool/rte_mempool.h
> > +++ b/lib/librte_mempool/rte_mempool.h
> > @@ -387,6 +387,12 @@ typedef int (*rte_mempool_dequeue_t)(struct
> > rte_mempool *mp, */
> > typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool
> > *mp); +/**
> > + * Set the memzone va/pa addr range and len in the external pool.
> > + */
> > +typedef void (*rte_mempool_populate_mz_range_t)(struct rte_mempool
> > *mp,
> > + const struct rte_memzone *mz);
> > +
>
> And this API would be:
> typedef void (*rte_mempool_populate_t)(struct rte_mempool *mp,
> char *vaddr, phys_addr_t paddr, size_t len)
>
>
> If your hw absolutly needs a contiguous memory, a solution could be:
>
> - add a new flag MEMPOOL_F_CONTIG (maybe a better nale could be
> found) saying that all the mempool objects must be contiguous.
> - add the ops_populate() callback in rte_mempool_populate_phys(), as
> suggested above
>
> Then:
>
> /* create an empty mempool */
> rte_mempool_create_empty(...);
>
> /* set the handler:
> * in the ext handler, the mempool flags are updated with
> * MEMPOOL_F_CONTIG
> */
> rte_mempool_set_ops_byname(..., "my_hardware");
>
You mean, ext handler will set mempool flag using 'pool_config' param; somethng
like rte_mempool_ops_by_name(..., "my_hardware" , MEMPOOL_F_CONTIG); ?
> /* if MEMPOOL_F_CONTIG is set, all populate() functions should ensure
> * that there is only one contiguous zone
> */
> rte_mempool_populate_default(...);
>
I am not too sure about scope of MEMPOOL_F_CONTIG. How MEMPOOL_F_CONTIG
flag {setted by application/ driver by calling rte_mempool_create(..., flag)}
can make sure only one contiguous zone? Like to understand from you.
In my understanding:
rte_mempool_populate_default() will request for memzone based on mp->size value;
And if mp->size more than one hugepage_sz{i.e. one mz request not enough} then
it will request more hugepages{ i.e. more mz request},. So IIIU then contiguity
not gauranteed.
> Regards,
> Olivier
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2] mempool: Introduce _populate_mz_range api
2017-01-31 14:32 ` Santosh Shukla
@ 2017-02-06 17:01 ` Olivier Matz
2017-02-07 4:00 ` Santosh Shukla
0 siblings, 1 reply; 7+ messages in thread
From: Olivier Matz @ 2017-02-06 17:01 UTC (permalink / raw)
To: Santosh Shukla; +Cc: dev, thomas.monjalon, jerin.jacob, hemant.agrawal
On Tue, 31 Jan 2017 20:02:26 +0530, Santosh Shukla
<santosh.shukla@caviumnetworks.com> wrote:
> Hi Olivier,
>
> Reply inline.
>
> On Tue, Jan 31, 2017 at 11:31:51AM +0100, Olivier Matz wrote:
> > Hi Santosh,
> >
> > I guess this patch is targeted for 17.05, right?
> >
>
> Yes.
>
> > Please see some other comments below.
> >
> > On Fri, 20 Jan 2017 20:43:41 +0530,
> > <santosh.shukla@caviumnetworks.com> wrote:
> > > From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
> > >
> > > HW pool manager e.g. Cavium SoC need s/w to program start and
> > > end address of pool. Currently there is no such api in
> > > ext-mempool.
> >
> > Today, the mempool objects are not necessarily contiguous in
> > virtual or physical memory. The only assumption that can be made is
> > that each object is contiguous (virtually and physically). If the
> > flag MEMPOOL_F_NO_PHYS_CONTIG is passed, each object is assured to
> > be contiguous virtually.
> >
>
> Some clarification: IIUC then pa/va addr for each hugepage (aka mz)
> is contiguous but no gaurantee of contiguity across the hugepages (aka
> muli-mzs), Right?
>
> If above said is correct then case like pool start addr in one mz and
> end addr is on another mz is a problem scenario. Correct? That said
> then ext-mempool drivers will get affected in such cases.
Yes that's globally correct, except it is possible that several
hugepages are physically contiguous (it will try to, but it's not
guaranteed).
>
> > > So introducing _populate_mz_range API which will let HW(pool
> > > manager) know about hugepage mapped virtual start and end
> > > address.
> >
> > rte_mempool_ops_populate_mz_range() looks a bit long. What about
> > rte_mempool_ops_populate() instead?
>
> Ok.
>
> > > diff --git a/lib/librte_mempool/rte_mempool.c
> > > b/lib/librte_mempool/rte_mempool.c index 1c2aed8..9a39f5c 100644
> > > --- a/lib/librte_mempool/rte_mempool.c
> > > +++ b/lib/librte_mempool/rte_mempool.c
> > > @@ -568,6 +568,10 @@ static unsigned optimize_object_size(unsigned
> > > obj_size) else
> > > paddr = mz->phys_addr;
> > >
> > > + /* Populate mz range */
> > > + if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0)
> > > + rte_mempool_ops_populate_mz_range(mp,
> > > mz); +
> > > if (rte_eal_has_hugepages()
> >
> > Given what I've said above, I think the populate() callback should
> > be in rte_mempool_populate_phys() instead of
> > rte_mempool_populate_default(). It would be called for each
> > contiguous zone.
> >
>
> Ok.
>
> > > --- a/lib/librte_mempool/rte_mempool.h
> > > +++ b/lib/librte_mempool/rte_mempool.h
> > > @@ -387,6 +387,12 @@ typedef int (*rte_mempool_dequeue_t)(struct
> > > rte_mempool *mp, */
> > > typedef unsigned (*rte_mempool_get_count)(const struct
> > > rte_mempool *mp); +/**
> > > + * Set the memzone va/pa addr range and len in the external pool.
> > > + */
> > > +typedef void (*rte_mempool_populate_mz_range_t)(struct
> > > rte_mempool *mp,
> > > + const struct rte_memzone *mz);
> > > +
> >
> > And this API would be:
> > typedef void (*rte_mempool_populate_t)(struct rte_mempool *mp,
> > char *vaddr, phys_addr_t paddr, size_t len)
> >
> >
> > If your hw absolutly needs a contiguous memory, a solution could be:
> >
> > - add a new flag MEMPOOL_F_CONTIG (maybe a better nale could be
> > found) saying that all the mempool objects must be contiguous.
> > - add the ops_populate() callback in rte_mempool_populate_phys(), as
> > suggested above
> >
> > Then:
> >
> > /* create an empty mempool */
> > rte_mempool_create_empty(...);
> >
> > /* set the handler:
> > * in the ext handler, the mempool flags are updated with
> > * MEMPOOL_F_CONTIG
> > */
> > rte_mempool_set_ops_byname(..., "my_hardware");
> >
>
> You mean, ext handler will set mempool flag using 'pool_config'
> param; somethng like rte_mempool_ops_by_name(..., "my_hardware" ,
> MEMPOOL_F_CONTIG); ?
I don't mean changing the API of rte_mempool_set_ops_byname().
I was suggesting something like this:
static int your_handler_alloc(struct rte_mempool *mp)
{
/* handler init */
...
mp->flags |= MEMPOOL_F_CONTIG;
return 0;
}
And update rte_mempool_populate_*() functions to manage this flag:
instead of segment, just fail if it cannot fit in one segment. It won't
really solve the issue, but at least it will be properly detected, and
the user could take dispositions to have more contiguous memory (ex:
use larger hugepages, allocate hugepages at boot time).
>
> > /* if MEMPOOL_F_CONTIG is set, all populate() functions should
> > ensure
> > * that there is only one contiguous zone
> > */
> > rte_mempool_populate_default(...);
> >
>
> I am not too sure about scope of MEMPOOL_F_CONTIG. How
> MEMPOOL_F_CONTIG flag {setted by application/ driver by calling
> rte_mempool_create(..., flag)} can make sure only one contiguous
> zone? Like to understand from you.
As described above, there would be no change from application point of
view. The handler would set the mempool flag by itself to change the
behavior of the populate functions.
>
> In my understanding:
> rte_mempool_populate_default() will request for memzone based on
> mp->size value; And if mp->size more than one hugepage_sz{i.e. one mz
> request not enough} then it will request more hugepages{ i.e. more mz
> request},. So IIIU then contiguity not gauranteed.
Yes, that's how it works today. As EAL cannot guarantees that the
hugepages are physically contiguous, it tries to segment the mempool
objects in several memory zones.
Regards,
Olivier
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2] mempool: Introduce _populate_mz_range api
2017-02-06 17:01 ` Olivier Matz
@ 2017-02-07 4:00 ` Santosh Shukla
0 siblings, 0 replies; 7+ messages in thread
From: Santosh Shukla @ 2017-02-07 4:00 UTC (permalink / raw)
To: Olivier Matz; +Cc: dev, thomas.monjalon, jerin.jacob, hemant.agrawal
Hi Olivier,
On Mon, Feb 06, 2017 at 06:01:15PM +0100, Olivier Matz wrote:
> On Tue, 31 Jan 2017 20:02:26 +0530, Santosh Shukla
> <santosh.shukla@caviumnetworks.com> wrote:
> > Hi Olivier,
> >
> > Reply inline.
> >
> > On Tue, Jan 31, 2017 at 11:31:51AM +0100, Olivier Matz wrote:
> > > Hi Santosh,
> > >
> > > I guess this patch is targeted for 17.05, right?
> > >
> >
> > Yes.
> >
> > > Please see some other comments below.
> > >
> > > On Fri, 20 Jan 2017 20:43:41 +0530,
> > > <santosh.shukla@caviumnetworks.com> wrote:
> > > > From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
[snip..]
> > > > --- a/lib/librte_mempool/rte_mempool.h
> > > > +++ b/lib/librte_mempool/rte_mempool.h
> > > > @@ -387,6 +387,12 @@ typedef int (*rte_mempool_dequeue_t)(struct
> > > > rte_mempool *mp, */
> > > > typedef unsigned (*rte_mempool_get_count)(const struct
> > > > rte_mempool *mp); +/**
> > > > + * Set the memzone va/pa addr range and len in the external pool.
> > > > + */
> > > > +typedef void (*rte_mempool_populate_mz_range_t)(struct
> > > > rte_mempool *mp,
> > > > + const struct rte_memzone *mz);
> > > > +
> > >
> > > And this API would be:
> > > typedef void (*rte_mempool_populate_t)(struct rte_mempool *mp,
> > > char *vaddr, phys_addr_t paddr, size_t len)
> > >
> > >
> > > If your hw absolutly needs a contiguous memory, a solution could be:
> > >
> > > - add a new flag MEMPOOL_F_CONTIG (maybe a better nale could be
> > > found) saying that all the mempool objects must be contiguous.
> > > - add the ops_populate() callback in rte_mempool_populate_phys(), as
> > > suggested above
> > >
> > > Then:
> > >
> > > /* create an empty mempool */
> > > rte_mempool_create_empty(...);
> > >
> > > /* set the handler:
> > > * in the ext handler, the mempool flags are updated with
> > > * MEMPOOL_F_CONTIG
> > > */
> > > rte_mempool_set_ops_byname(..., "my_hardware");
> > >
> >
> > You mean, ext handler will set mempool flag using 'pool_config'
> > param; somethng like rte_mempool_ops_by_name(..., "my_hardware" ,
> > MEMPOOL_F_CONTIG); ?
>
> I don't mean changing the API of rte_mempool_set_ops_byname().
> I was suggesting something like this:
>
> static int your_handler_alloc(struct rte_mempool *mp)
> {
> /* handler init */
> ...
>
> mp->flags |= MEMPOOL_F_CONTIG;
> return 0;
> }
>
Ok,. Will do in v3.
> And update rte_mempool_populate_*() functions to manage this flag:
> instead of segment, just fail if it cannot fit in one segment. It won't
> really solve the issue, but at least it will be properly detected, and
> the user could take dispositions to have more contiguous memory (ex:
> use larger hugepages, allocate hugepages at boot time).
>
Agree.
Will take care in v3.
> >
> > > /* if MEMPOOL_F_CONTIG is set, all populate() functions should
> > > ensure
> > > * that there is only one contiguous zone
> > > */
> > > rte_mempool_populate_default(...);
> > >
> >
> > I am not too sure about scope of MEMPOOL_F_CONTIG. How
> > MEMPOOL_F_CONTIG flag {setted by application/ driver by calling
> > rte_mempool_create(..., flag)} can make sure only one contiguous
> > zone? Like to understand from you.
>
> As described above, there would be no change from application point of
> view. The handler would set the mempool flag by itself to change the
> behavior of the populate functions.
>
>
Right.
> >
> > In my understanding:
> > rte_mempool_populate_default() will request for memzone based on
> > mp->size value; And if mp->size more than one hugepage_sz{i.e. one mz
> > request not enough} then it will request more hugepages{ i.e. more mz
> > request},. So IIIU then contiguity not gauranteed.
>
> Yes, that's how it works today. As EAL cannot guarantees that the
> hugepages are physically contiguous, it tries to segment the mempool
> objects in several memory zones.
>
>
> Regards,
> Olivier
>
>
>
Regards,
Santosh
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2017-02-07 4:00 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-20 14:20 [dpdk-dev] [PATCH] mempool: Introduce _populate_mz_range api santosh.shukla
2017-01-20 14:38 ` Jerin Jacob
2017-01-20 15:13 ` [dpdk-dev] [PATCH v2] " santosh.shukla
2017-01-31 10:31 ` Olivier Matz
2017-01-31 14:32 ` Santosh Shukla
2017-02-06 17:01 ` Olivier Matz
2017-02-07 4:00 ` Santosh Shukla
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).