From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id 423D447CE for ; Wed, 9 Mar 2016 17:22:15 +0100 (CET) Received: from glumotte.dev.6wind.com (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id 3D21B249A0 for ; Wed, 9 Mar 2016 17:21:32 +0100 (CET) From: Olivier Matz To: dev@dpdk.org Date: Wed, 9 Mar 2016 17:19:25 +0100 Message-Id: <1457540381-20274-20-git-send-email-olivier.matz@6wind.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1457540381-20274-1-git-send-email-olivier.matz@6wind.com> References: <1457540381-20274-1-git-send-email-olivier.matz@6wind.com> Subject: [dpdk-dev] [RFC 19/35] mempool: introduce a free callback for memory chunks X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 09 Mar 2016 16:22:16 -0000 Introduce a free callback that is passed to the populate* functions, which is used when freeing a mempool. This is unused now, but as next commits will populate the mempool with several chunks of memory, we need a way to free them properly on error. Later in the series, we will also introduce a public rte_mempool_free() and the ability for the user to populate a mempool with its own memory. For that, we also need a free callback. Signed-off-by: Olivier Matz --- lib/librte_mempool/rte_mempool.c | 27 ++++++++++++++++++++++----- lib/librte_mempool/rte_mempool.h | 8 ++++++++ 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 905387f..5bfe4cb 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -390,6 +390,15 @@ rte_mempool_ring_create(struct rte_mempool *mp) return 0; } +/* free a memchunk allocated with rte_memzone_reserve() */ +__rte_unused static void +rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr, + void *opaque) +{ + const struct rte_memzone *mz = opaque; + rte_memzone_free(mz); +} + /* Free memory chunks used by a mempool. Objects must be in pool */ static void rte_mempool_free_memchunks(struct rte_mempool *mp) @@ -407,6 +416,8 @@ rte_mempool_free_memchunks(struct rte_mempool *mp) while (!STAILQ_EMPTY(&mp->mem_list)) { memhdr = STAILQ_FIRST(&mp->mem_list); STAILQ_REMOVE_HEAD(&mp->mem_list, next); + if (memhdr->free_cb != NULL) + memhdr->free_cb(memhdr, memhdr->opaque); rte_free(memhdr); mp->nb_mem_chunks--; } @@ -417,7 +428,8 @@ rte_mempool_free_memchunks(struct rte_mempool *mp) * on error. */ static int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, - phys_addr_t paddr, size_t len) + phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque) { unsigned total_elt_sz; unsigned i = 0; @@ -438,6 +450,8 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, memhdr->addr = vaddr; memhdr->phys_addr = paddr; memhdr->len = len; + memhdr->free_cb = free_cb; + memhdr->opaque = opaque; if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN) off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr; @@ -464,7 +478,8 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, * number of objects added, or a negative value on error. */ static int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, - const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift) + const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift, + rte_mempool_memchunk_free_cb_t *free_cb, void *opaque) { uint32_t i, n; int ret, cnt = 0; @@ -482,11 +497,13 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, ; ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz, - paddr[i], n * pg_sz); + paddr[i], n * pg_sz, free_cb, opaque); if (ret < 0) { rte_mempool_free_memchunks(mp); return ret; } + /* no need to call the free callback for next chunks */ + free_cb = NULL; cnt += ret; } return cnt; @@ -668,12 +685,12 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, ret = rte_mempool_populate_phys(mp, obj, mp->phys_addr + ((char *)obj - (char *)mp), - objsz.total_size * n); + objsz.total_size * n, NULL, NULL); if (ret != (int)mp->size) goto exit_unlock; } else { ret = rte_mempool_populate_phys_tab(mp, vaddr, - paddr, pg_num, pg_shift); + paddr, pg_num, pg_shift, NULL, NULL); if (ret != (int)mp->size) goto exit_unlock; } diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 184d40d..dacdf6c 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -187,6 +187,12 @@ struct rte_mempool_objtlr { STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr); /** + * Callback used to free a memory chunk + */ +typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr, + void *opaque); + +/** * Mempool objects memory header structure * * The memory chunks where objects are stored. Each chunk is virtually @@ -198,6 +204,8 @@ struct rte_mempool_memhdr { void *addr; /**< Virtual address of the chunk */ phys_addr_t phys_addr; /**< Physical address of the chunk */ size_t len; /**< length of the chunk */ + rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */ + void *opaque; /**< Argument passed to the free callback */ }; /** -- 2.1.4