From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 42F40B59D for ; Thu, 19 Feb 2015 18:44:27 +0100 (CET) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga101.jf.intel.com with ESMTP; 19 Feb 2015 09:44:25 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.09,609,1418112000"; d="scan'208";a="680459593" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by fmsmga002.fm.intel.com with ESMTP; 19 Feb 2015 09:44:23 -0800 Received: from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com [10.237.217.46]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id t1JHiNPd019750; Thu, 19 Feb 2015 17:44:23 GMT Received: from sivswdev02.ir.intel.com (localhost [127.0.0.1]) by sivswdev02.ir.intel.com with ESMTP id t1JHiNwS028166; Thu, 19 Feb 2015 17:44:23 GMT Received: (from kananye1@localhost) by sivswdev02.ir.intel.com with id t1JHiNU8028162; Thu, 19 Feb 2015 17:44:23 GMT From: Konstantin Ananyev To: dev@dpdk.org Date: Thu, 19 Feb 2015 17:44:19 +0000 Message-Id: <1424367859-28122-1-git-send-email-konstantin.ananyev@intel.com> X-Mailer: git-send-email 1.7.4.1 Subject: [dpdk-dev] [PATCH] ACL: use setjmp/longjmp to handle memory allocation failure at build phase X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 19 Feb 2015 17:44:27 -0000 During build phase ACL doing quite a lot of memory allocations for relatively small temporary structures. In theory each of such allocation can fail, so we need to handle all these possible failures. That adds a lot of extra checks and makes the code harder to read and follow. To simplify the process, made changes to handle all such failures in one place. Note, that all that memory for temporary structures is freed at one go at the end of build phase. Signed-off-by: Konstantin Ananyev --- lib/librte_acl/acl_bld.c | 42 +++++++++++++----------------------------- lib/librte_acl/tb_mem.c | 8 ++++++-- lib/librte_acl/tb_mem.h | 3 +++ 3 files changed, 22 insertions(+), 31 deletions(-) diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c index 1fe79fb..ddb23fd 100644 --- a/lib/librte_acl/acl_bld.c +++ b/lib/librte_acl/acl_bld.c @@ -302,8 +302,6 @@ acl_add_ptr(struct acl_build_context *context, /* add room for more pointers */ num_ptrs = node->max_ptrs + ACL_PTR_ALLOC; ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs)); - if (ptrs == NULL) - return -ENOMEM; /* copy current points to new memory allocation */ if (node->ptrs != NULL) { @@ -477,16 +475,12 @@ acl_dup_node(struct acl_build_context *context, struct rte_acl_node *node) struct rte_acl_node *next; next = acl_alloc_node(context, node->level); - if (next == NULL) - return NULL; /* allocate the pointers */ if (node->num_ptrs > 0) { next->ptrs = acl_build_alloc(context, node->max_ptrs, sizeof(struct rte_acl_ptr_set)); - if (next->ptrs == NULL) - return NULL; next->max_ptrs = node->max_ptrs; } @@ -669,8 +663,6 @@ acl_merge_intersect(struct acl_build_context *context, /* Duplicate A for intersection */ node_c = acl_dup_node(context, node_a->ptrs[idx_a].ptr); - if (node_c == NULL) - return -1; /* Remove intersection from A */ acl_exclude_ptr(context, node_a, idx_a, intersect_ptr); @@ -1328,14 +1320,10 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head, rule = head; trie = acl_alloc_node(context, 0); - if (trie == NULL) - return NULL; while (rule != NULL) { root = acl_alloc_node(context, 0); - if (root == NULL) - return NULL; root->ref_count = 1; end = root; @@ -1419,10 +1407,9 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head, * Setup the results for this rule. * The result and priority of each category. */ - if (end->mrt == NULL && - (end->mrt = acl_build_alloc(context, 1, - sizeof(*end->mrt))) == NULL) - return NULL; + if (end->mrt == NULL) + end->mrt = acl_build_alloc(context, 1, + sizeof(*end->mrt)); for (m = 0; m < context->cfg.num_categories; m++) { if (rule->f->data.category_mask & (1 << m)) { @@ -1760,13 +1747,6 @@ acl_build_tries(struct acl_build_context *context, /* Create a new copy of config for remaining rules. */ config = acl_build_alloc(context, 1, sizeof(*config)); - if (config == NULL) { - RTE_LOG(ERR, ACL, - "New config allocation for %u-th " - "trie failed\n", num_tries); - return -ENOMEM; - } - memcpy(config, rule_sets[n]->config, sizeof(*config)); /* Make remaining rules use new config. */ @@ -1825,12 +1805,6 @@ acl_build_rules(struct acl_build_context *bcx) sz = ofs + n * fn * sizeof(*wp); br = tb_alloc(&bcx->pool, sz); - if (br == NULL) { - RTE_LOG(ERR, ACL, "ACL context %s: failed to create a copy " - "of %u build rules (%zu bytes)\n", - bcx->acx->name, n, sz); - return -ENOMEM; - } wp = (uint32_t *)((uintptr_t)br + ofs); num = 0; @@ -1895,6 +1869,16 @@ acl_bld(struct acl_build_context *bcx, struct rte_acl_ctx *ctx, bcx->category_mask = LEN2MASK(bcx->cfg.num_categories); bcx->node_max = node_max; + rc = sigsetjmp(bcx->pool.fail, 0); + + /* build phase runs out of memory. */ + if (rc != 0) { + RTE_LOG(ERR, ACL, + "ACL context: %s, %s() failed with error code: %d\n", + bcx->acx->name, __func__, rc); + return rc; + } + /* Create a build rules copy. */ rc = acl_build_rules(bcx); if (rc != 0) diff --git a/lib/librte_acl/tb_mem.c b/lib/librte_acl/tb_mem.c index fdf3080..157e608 100644 --- a/lib/librte_acl/tb_mem.c +++ b/lib/librte_acl/tb_mem.c @@ -37,6 +37,11 @@ * Memory management routines for temporary memory. * That memory is used only during build phase and is released after * build is finished. + * Note, that tb_pool/tb_alloc() are not supposed to return NULL. + * Instead, in the case of failure to allocate memory, + * it would do siglongjmp(pool->fail). + * It is responsibility of the caller to save the proper context/environment, + * in the pool->fail before calling tb_alloc() for the given pool first time. */ static struct tb_mem_block * @@ -51,6 +56,7 @@ tb_pool(struct tb_mem_pool *pool, size_t sz) if (block == NULL) { RTE_LOG(ERR, MALLOC, "%s(%zu)\n failed, currently allocated " "by pool: %zu bytes\n", __func__, sz, pool->alloc); + siglongjmp(pool->fail, -ENOMEM); return NULL; } @@ -81,8 +87,6 @@ tb_alloc(struct tb_mem_pool *pool, size_t size) if (block == NULL || block->size < size) { new_sz = (size > pool->min_alloc) ? size : pool->min_alloc; block = tb_pool(pool, new_sz); - if (block == NULL) - return NULL; } ptr = block->mem; block->size -= size; diff --git a/lib/librte_acl/tb_mem.h b/lib/librte_acl/tb_mem.h index a8dae94..ca7af96 100644 --- a/lib/librte_acl/tb_mem.h +++ b/lib/librte_acl/tb_mem.h @@ -48,6 +48,7 @@ extern "C" { #endif #include +#include struct tb_mem_block { struct tb_mem_block *next; @@ -61,6 +62,8 @@ struct tb_mem_pool { size_t alignment; size_t min_alloc; size_t alloc; + /* jump target in case of memory allocation failure. */ + sigjmp_buf fail; }; void *tb_alloc(struct tb_mem_pool *pool, size_t size); -- 1.8.5.3