From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id C23846883 for ; Fri, 3 Jun 2016 16:58:45 +0200 (CEST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP; 03 Jun 2016 07:58:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.26,412,1459839600"; d="scan'208";a="980252265" Received: from sie-lab-214-251.ir.intel.com (HELO silpixa373510.ir.intel.com) ([10.237.214.251]) by fmsmga001.fm.intel.com with ESMTP; 03 Jun 2016 07:58:43 -0700 From: David Hunt To: dev@dpdk.org Cc: olivier.matz@6wind.com, viktorin@rehivetech.com, jerin.jacob@caviumnetworks.com, David Hunt Date: Fri, 3 Jun 2016 15:58:25 +0100 Message-Id: <1464965906-108927-3-git-send-email-david.hunt@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1464965906-108927-1-git-send-email-david.hunt@intel.com> References: <1464874043-67467-1-git-send-email-david.hunt@intel.com> <1464965906-108927-1-git-send-email-david.hunt@intel.com> Subject: [dpdk-dev] [PATCH v8 2/3] app/test: test external mempool manager X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 03 Jun 2016 14:58:46 -0000 Use a minimal custom mempool external ops and check that it also passes basic mempool autotests. Signed-off-by: Olivier Matz Signed-off-by: David Hunt --- app/test/test_mempool.c | 114 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index b586249..8526670 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -83,6 +83,97 @@ static rte_atomic32_t synchro; /* + * Simple example of custom mempool structure. Holds pointers to all the + * elements which are simply malloc'd in this example. + */ +struct custom_mempool { + rte_spinlock_t lock; + unsigned count; + unsigned size; + void *elts[]; +}; + +/* + * Loop through all the element pointers and allocate a chunk of memory, then + * insert that memory into the ring. + */ +static void * +custom_mempool_alloc(struct rte_mempool *mp) +{ + struct custom_mempool *cm; + + cm = rte_zmalloc("custom_mempool", + sizeof(struct custom_mempool) + mp->size * sizeof(void *), 0); + if (cm == NULL) + return NULL; + + rte_spinlock_init(&cm->lock); + cm->count = 0; + cm->size = mp->size; + return cm; +} + +static void +custom_mempool_free(struct rte_mempool *mp) +{ + rte_free((void *)(mp->pool_data)); +} + +static int +custom_mempool_put(struct rte_mempool *mp, void * const *obj_table, unsigned n) +{ + struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data); + int ret = 0; + + rte_spinlock_lock(&cm->lock); + if (cm->count + n > cm->size) { + ret = -ENOBUFS; + } else { + memcpy(&cm->elts[cm->count], obj_table, sizeof(void *) * n); + cm->count += n; + } + rte_spinlock_unlock(&cm->lock); + return ret; +} + + +static int +custom_mempool_get(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data); + int ret = 0; + + rte_spinlock_lock(&cm->lock); + if (n > cm->count) { + ret = -ENOENT; + } else { + cm->count -= n; + memcpy(obj_table, &cm->elts[cm->count], sizeof(void *) * n); + } + rte_spinlock_unlock(&cm->lock); + return ret; +} + +static unsigned +custom_mempool_get_count(const struct rte_mempool *mp) +{ + struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data); + + return cm->count; +} + +static struct rte_mempool_ops mempool_ops_custom = { + .name = "custom_handler", + .alloc = custom_mempool_alloc, + .free = custom_mempool_free, + .put = custom_mempool_put, + .get = custom_mempool_get, + .get_count = custom_mempool_get_count, +}; + +MEMPOOL_REGISTER_OPS(mempool_ops_custom); + +/* * save the object number in the first 4 bytes of object data. All * other bytes are set to 0. */ @@ -477,6 +568,7 @@ test_mempool(void) { struct rte_mempool *mp_cache = NULL; struct rte_mempool *mp_nocache = NULL; + struct rte_mempool *mp_ext = NULL; rte_atomic32_init(&synchro); @@ -505,6 +597,27 @@ test_mempool(void) goto err; } + /* create a mempool with an external handler */ + mp_ext = rte_mempool_create_empty("test_ext", + MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE, 0, + SOCKET_ID_ANY, 0); + + if (mp_ext == NULL) { + printf("cannot allocate mp_ext mempool\n"); + goto err; + } + if (rte_mempool_set_ops_byname(mp_ext, "custom_handler") < 0) { + printf("cannot set custom handler\n"); + goto err; + } + if (rte_mempool_populate_default(mp_ext) < 0) { + printf("cannot populate mp_ext mempool\n"); + goto err; + } + rte_mempool_obj_iter(mp_ext, my_obj_init, NULL); + /* retrieve the mempool from its name */ if (rte_mempool_lookup("test_nocache") != mp_nocache) { printf("Cannot lookup mempool from its name\n"); @@ -545,6 +658,7 @@ test_mempool(void) err: rte_mempool_free(mp_nocache); rte_mempool_free(mp_cache); + rte_mempool_free(mp_ext); return -1; } -- 2.5.5