From: <santosh.shukla@caviumnetworks.com>
To: <olivier.matz@6wind.com>, <dev@dpdk.org>
Cc: <thomas.monjalon@6wind.com>, <jerin.jacob@caviumnetworks.com>,
<hemant.agrawal@nxp.com>,
Santosh Shukla <santosh.shukla@caviumnetworks.com>
Subject: [dpdk-dev] [PATCH] mempool: Introduce _populate_mz_range api
Date: Fri, 20 Jan 2017 19:50:17 +0530 [thread overview]
Message-ID: <1484922017-26030-1-git-send-email-santosh.shukla@caviumnetworks.com> (raw)
From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
HW pool manager e.g. Cavium SoC need s/w to program start and
end address of pool. Currently there is no such api in ext-mempool.
So introducing _populate_mz_range API which will let HW(pool manager)
know about hugepage mapped virtual start and end address.
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
lib/librte_mempool/rte_mempool.c | 4 ++++
lib/librte_mempool/rte_mempool.h | 22 ++++++++++++++++++++++
lib/librte_mempool/rte_mempool_ops.c | 17 +++++++++++++++++
lib/librte_mempool/rte_mempool_ring.c | 4 ++++
lib/librte_mempool/rte_mempool_stack.c | 3 ++-
5 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 1c2aed8..9a39f5c 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -568,6 +568,10 @@ static unsigned optimize_object_size(unsigned obj_size)
else
paddr = mz->phys_addr;
+ /* Populate mz range */
+ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0)
+ rte_mempool_ops_populate_mz_range(mp, mz);
+
if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
ret = rte_mempool_populate_phys(mp, mz->addr,
paddr, mz->len,
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index d0f5b27..3ae8aa8 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -387,6 +387,12 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
*/
typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
+/**
+ * Set the memzone va/pa addr range and len in the external pool.
+ */
+typedef void (*rte_mempool_populate_mz_range_t)(struct rte_mempool *mp,
+ const struct rte_memzone *mz);
+
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
@@ -395,6 +401,8 @@ struct rte_mempool_ops {
rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
rte_mempool_get_count get_count; /**< Get qty of available objs. */
+ rte_mempool_populate_mz_range_t populate_mz_range; /**< set per pool
+ memzone info */
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
@@ -438,6 +446,20 @@ struct rte_mempool_ops_table {
}
/**
+ * @internal Wrapper for mempool_ops populate memzone's va/pa addr callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ *
+ * @param mz
+ * Pointer to the memory zone.
+ */
+void
+rte_mempool_ops_populate_mz_range(struct rte_mempool *mp,
+ const struct rte_memzone *mz);
+
+
+/**
* @internal Wrapper for mempool_ops alloc callback.
*
* @param mp
diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c
index 5f24de2..ea79fc1 100644
--- a/lib/librte_mempool/rte_mempool_ops.c
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -85,12 +85,29 @@ struct rte_mempool_ops_table rte_mempool_ops_table = {
ops->enqueue = h->enqueue;
ops->dequeue = h->dequeue;
ops->get_count = h->get_count;
+ ops->populate_mz_range = h->populate_mz_range;
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
return ops_index;
}
+/*
+ * wrapper to populate mz's pa/va addr range and len info to external
+ * mempool. HW mempool implementation to cache-in this inforamation
+ * in their local data structure.
+ * Note: api always get called before ops_alloc().
+ * */
+void
+rte_mempool_ops_populate_mz_range(struct rte_mempool *mp,
+ const struct rte_memzone *mz)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->populate_mz_range(mp, mz);
+}
+
/* wrapper to allocate an external mempool's private (pool) data. */
int
rte_mempool_ops_alloc(struct rte_mempool *mp)
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
index b9aa64d..7d32384 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -126,6 +126,7 @@
.enqueue = common_ring_mp_enqueue,
.dequeue = common_ring_mc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_sp_sc = {
@@ -135,6 +136,7 @@
.enqueue = common_ring_sp_enqueue,
.dequeue = common_ring_sc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_mp_sc = {
@@ -144,6 +146,7 @@
.enqueue = common_ring_mp_enqueue,
.dequeue = common_ring_sc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
static const struct rte_mempool_ops ops_sp_mc = {
@@ -153,6 +156,7 @@
.enqueue = common_ring_sp_enqueue,
.dequeue = common_ring_mc_dequeue,
.get_count = common_ring_get_count,
+ .populate_mz_range = NULL,
};
MEMPOOL_REGISTER_OPS(ops_mp_mc);
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
index 5fd8af2..6b0b2bd 100644
--- a/lib/librte_mempool/rte_mempool_stack.c
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -141,7 +141,8 @@ struct rte_mempool_stack {
.free = stack_free,
.enqueue = stack_enqueue,
.dequeue = stack_dequeue,
- .get_count = stack_get_count
+ .get_count = stack_get_count,
+ .populate_mz_range = NULL
};
MEMPOOL_REGISTER_OPS(ops_stack);
--
1.7.9.5
next reply other threads:[~2017-01-20 14:21 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-01-20 14:20 santosh.shukla [this message]
2017-01-20 14:38 ` Jerin Jacob
2017-01-20 15:13 ` [dpdk-dev] [PATCH v2] " santosh.shukla
2017-01-31 10:31 ` Olivier Matz
2017-01-31 14:32 ` Santosh Shukla
2017-02-06 17:01 ` Olivier Matz
2017-02-07 4:00 ` Santosh Shukla
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1484922017-26030-1-git-send-email-santosh.shukla@caviumnetworks.com \
--to=santosh.shukla@caviumnetworks.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jerin.jacob@caviumnetworks.com \
--cc=olivier.matz@6wind.com \
--cc=thomas.monjalon@6wind.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).