From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id 977D14CE7 for ; Wed, 9 Mar 2016 17:22:15 +0100 (CET) Received: from glumotte.dev.6wind.com (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id 89C6B249A0 for ; Wed, 9 Mar 2016 17:21:32 +0100 (CET) From: Olivier Matz To: dev@dpdk.org Date: Wed, 9 Mar 2016 17:19:32 +0100 Message-Id: <1457540381-20274-27-git-send-email-olivier.matz@6wind.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1457540381-20274-1-git-send-email-olivier.matz@6wind.com> References: <1457540381-20274-1-git-send-email-olivier.matz@6wind.com> Subject: [dpdk-dev] [RFC 26/35] mempool: introduce a function to create an empty mempool X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 09 Mar 2016 16:22:16 -0000 Introduce a new function rte_mempool_create_empty() that allocates a mempool that is not populated. The functions rte_mempool_create() and rte_mempool_xmem_create() now make use of it, making their code much easier to read. Currently, they are the only users of rte_mempool_create_empty() but the function will be made public in next commits. Signed-off-by: Olivier Matz --- lib/librte_mempool/rte_mempool.c | 185 ++++++++++++++++++++++----------------- 1 file changed, 107 insertions(+), 78 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 4b74ffd..afb2992 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -320,30 +320,6 @@ rte_dom0_mempool_create(const char *name __rte_unused, } #endif -/* create the mempool */ -struct rte_mempool * -rte_mempool_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags) -{ - if (rte_xen_dom0_supported()) - return rte_dom0_mempool_create(name, n, elt_size, - cache_size, private_data_size, - mp_init, mp_init_arg, - obj_init, obj_init_arg, - socket_id, flags); - else - return rte_mempool_xmem_create(name, n, elt_size, - cache_size, private_data_size, - mp_init, mp_init_arg, - obj_init, obj_init_arg, - socket_id, flags, - NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, - MEMPOOL_PG_SHIFT_MAX); -} - /* create the internal ring */ static int rte_mempool_ring_create(struct rte_mempool *mp) @@ -647,20 +623,11 @@ rte_mempool_free(struct rte_mempool *mp) rte_memzone_free(mp->mz); } -/* - * Create the mempool over already allocated chunk of memory. - * That external memory buffer can consists of physically disjoint pages. - * Setting vaddr to NULL, makes mempool to fallback to original behaviour - * and allocate space for mempool and it's elements as one big chunk of - * physically continuos memory. - * */ -struct rte_mempool * -rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, - unsigned cache_size, unsigned private_data_size, - rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, - int socket_id, unsigned flags, void *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) +/* create an empty mempool */ +static struct rte_mempool * +rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + int socket_id, unsigned flags) { char mz_name[RTE_MEMZONE_NAMESIZE]; struct rte_mempool_list *mempool_list; @@ -670,7 +637,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, size_t mempool_size; int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; struct rte_mempool_objsz objsz; - int ret; /* compilation-time checks */ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) & @@ -693,18 +659,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, return NULL; } - /* check that we have both VA and PA */ - if (vaddr != NULL && paddr == NULL) { - rte_errno = EINVAL; - return NULL; - } - - /* Check that pg_num and pg_shift parameters are valid. */ - if (pg_num == 0 || pg_shift > MEMPOOL_PG_SHIFT_MAX) { - rte_errno = EINVAL; - return NULL; - } - /* "no cache align" imply "no spread" */ if (flags & MEMPOOL_F_NO_CACHE_ALIGN) flags |= MEMPOOL_F_NO_SPREAD; @@ -732,11 +686,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, goto exit_unlock; } - /* - * If user provided an external memory buffer, then use it to - * store mempool objects. Otherwise reserve a memzone that is large - * enough to hold mempool header and metadata plus mempool objects. - */ mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size); mempool_size += private_data_size; mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN); @@ -748,12 +697,14 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, goto exit_unlock; /* init the mempool structure */ + mp = mz->addr; memset(mp, 0, sizeof(*mp)); snprintf(mp->name, sizeof(mp->name), "%s", name); mp->mz = mz; mp->socket_id = socket_id; mp->size = n; mp->flags = flags; + mp->socket_id = socket_id; mp->elt_size = objsz.elt_size; mp->header_size = objsz.header_size; mp->trailer_size = objsz.trailer_size; @@ -773,41 +724,119 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, mp->local_cache = (struct rte_mempool_cache *) ((char *)mp + MEMPOOL_HEADER_SIZE(mp, 0)); - /* call the initializer */ + te->data = mp; + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + TAILQ_INSERT_TAIL(mempool_list, te, next); + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + + return mp; + +exit_unlock: + rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + rte_free(te); + rte_mempool_free(mp); + return NULL; +} + +/* create the mempool */ +struct rte_mempool * +rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) +{ + struct rte_mempool *mp; + + if (rte_xen_dom0_supported()) + return rte_dom0_mempool_create(name, n, elt_size, + cache_size, private_data_size, + mp_init, mp_init_arg, + obj_init, obj_init_arg, + socket_id, flags); + + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, + private_data_size, socket_id, flags); + if (mp == NULL) + return NULL; + + /* call the mempool priv initializer */ if (mp_init) mp_init(mp, mp_init_arg); - /* mempool elements allocated together with mempool */ + if (rte_mempool_populate_default(mp) < 0) + goto fail; + + /* call the object initializers */ + if (obj_init) + rte_mempool_obj_iter(mp, obj_init, obj_init_arg); + + return mp; + + fail: + rte_mempool_free(mp); + return NULL; +} + +/* + * Create the mempool over already allocated chunk of memory. + * That external memory buffer can consists of physically disjoint pages. + * Setting vaddr to NULL, makes mempool to fallback to original behaviour + * and allocate space for mempool and it's elements as one big chunk of + * physically continuos memory. + * */ +struct rte_mempool * +rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags, void *vaddr, + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) +{ + struct rte_mempool *mp = NULL; + int ret; + + /* no virtual address supplied, use rte_mempool_create() */ if (vaddr == NULL) - ret = rte_mempool_populate_default(mp); - else - ret = rte_mempool_populate_phys_tab(mp, vaddr, - paddr, pg_num, pg_shift, NULL, NULL); - if (ret < 0) { - rte_errno = -ret; - goto exit_unlock; - } else if (ret != (int)mp->size) { + return rte_mempool_create(name, n, elt_size, cache_size, + private_data_size, mp_init, mp_init_arg, + obj_init, obj_init_arg, socket_id, flags); + + /* check that we have both VA and PA */ + if (paddr == NULL) { rte_errno = EINVAL; - goto exit_unlock; + return NULL; } - /* call the initializer */ - if (obj_init) - rte_mempool_obj_iter(mp, obj_init, obj_init_arg); + /* Check that pg_shift parameter is valid. */ + if (pg_shift > MEMPOOL_PG_SHIFT_MAX) { + rte_errno = EINVAL; + return NULL; + } - te->data = (void *) mp; + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, + private_data_size, socket_id, flags); + if (mp == NULL) + return NULL; - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); - TAILQ_INSERT_TAIL(mempool_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + /* call the mempool priv initializer */ + if (mp_init) + mp_init(mp, mp_init_arg); + + ret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift, + NULL, NULL); + if (ret < 0 || ret != (int)mp->size) + goto fail; + + /* call the object initializers */ + if (obj_init) + rte_mempool_obj_iter(mp, obj_init, obj_init_arg); return mp; -exit_unlock: - rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); + fail: rte_mempool_free(mp); - return NULL; } -- 2.1.4