* [dpdk-dev] [PATCH 1/2] Fix checkpatch errors in librte_acl
  2014-12-25 15:31 [dpdk-dev] [PATCH 0/2] Fix checkpatch errors Ravi Kerur
@ 2014-12-25 15:31 ` Ravi Kerur
  2015-01-05 12:08   ` Bruce Richardson
  2014-12-25 15:31 ` [dpdk-dev] [PATCH 2/2] Fix checkpatch errors in librte_mempool Ravi Kerur
  2015-01-05 12:11 ` [dpdk-dev] [PATCH 0/2] Fix checkpatch errors Bruce Richardson
  2 siblings, 1 reply; 5+ messages in thread
From: Ravi Kerur @ 2014-12-25 15:31 UTC (permalink / raw)
  To: dev
Fix checkpatch warnings and errors in lib/librte_acl. checkpatch
is run as follows
scripts/checkpatch.pl --no-tree --file <file_name>
Following warnings are treated as false-positive
1. WARNING: quoted string split across lines
2. WARNING: do not add new typedefs
3. WARNING: __aligned(size) is preferred over __attribute__((aligned(size)))
Signed-off-by: Ravi Kerur <rkerur@gmail.com>
---
 lib/librte_acl/acl_bld.c             | 192 +++++++++++++++++++----------------
 lib/librte_acl/rte_acl.c             |   3 +-
 lib/librte_acl/rte_acl_osdep_alone.h |   3 +-
 lib/librte_acl/tb_mem.c              |   5 +-
 4 files changed, 109 insertions(+), 94 deletions(-)
diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index d6e0c45..1f60411 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -773,11 +773,13 @@ acl_merge(struct acl_build_context *context,
 		for (n = 0; n < ptrs_a; n++) {
 			for (m = 0; m < ptrs_b; m++) {
 
+				uint32_t acl_intsct_type, num_cats;
+
 				if (node_a->ptrs[n].ptr == NULL ||
-						node_b->ptrs[m].ptr == NULL ||
-						node_a->ptrs[n].ptr ==
-						node_b->ptrs[m].ptr)
-						continue;
+					node_b->ptrs[m].ptr == NULL ||
+					node_a->ptrs[n].ptr ==
+					node_b->ptrs[m].ptr)
+					continue;
 
 				intersect_type = acl_intersect_type(
 					&node_a->ptrs[n].values,
@@ -785,35 +787,38 @@ acl_merge(struct acl_build_context *context,
 					&intersect_ptr);
 
 				/* If this node is not a 'match' node */
-				if ((intersect_type & ACL_INTERSECT) &&
-					(context->cfg.num_categories != 1 ||
-					!(node_a->ptrs[n].ptr->match_flag))) {
-
-					/*
-					 * next merge is a 'move' pointer,
-					 * if this one is and B is a
-					 * subset of the intersection.
-					 */
-					next_move = move &&
-						(intersect_type &
-						ACL_INTERSECT_B) == 0;
-
-					if (a_subset && b_full) {
-						rc = acl_merge(context,
-							node_a->ptrs[n].ptr,
-							node_b->ptrs[m].ptr,
-							next_move,
-							1, level + 1);
-						if (rc != 0)
-							return rc;
-					} else {
-						rc = acl_merge_intersect(
-							context, node_a, n,
-							node_b, m, next_move,
-							level, &intersect_ptr);
-						if (rc != 0)
-							return rc;
-					}
+				acl_intsct_type =
+					intersect_type & ACL_INTERSECT;
+				num_cats = (context->cfg.num_categories != 1 ||
+					!(node_a->ptrs[n].ptr->match_flag));
+
+				if (!(acl_intsct_type && num_cats))
+					continue;
+
+				/*
+				 * next merge is a 'move' pointer,
+				 * if this one is and B is a
+				 * subset of the intersection.
+				 */
+				next_move = move &&
+					(intersect_type &
+					ACL_INTERSECT_B) == 0;
+
+				if (a_subset && b_full) {
+					rc = acl_merge(context,
+						node_a->ptrs[n].ptr,
+						node_b->ptrs[m].ptr,
+						next_move,
+						1, level + 1);
+					if (rc != 0)
+						return rc;
+				} else {
+					rc = acl_merge_intersect(
+						context, node_a, n,
+						node_b, m, next_move,
+						level, &intersect_ptr);
+					if (rc != 0)
+						return rc;
 				}
 			}
 		}
@@ -1099,52 +1104,52 @@ acl_merge_trie(struct acl_build_context *context,
 					&node_b->ptrs[m].values,
 					&child_intersect);
 
-				if ((child_intersect_type & ACL_INTERSECT) !=
-						0) {
-					if (acl_merge_trie(context,
-							node_c->ptrs[n].ptr,
-							node_b->ptrs[m].ptr,
-							level + 1, subtree_id,
-							&child_node_c))
-						return 1;
-
-					if (child_node_c != NULL &&
-							child_node_c !=
-							node_c->ptrs[n].ptr) {
-
-						node_b_refs++;
-
-						/*
-						 * Added link from C to
-						 * child_C for all transitions
-						 * in the intersection.
-						 */
-						acl_add_ptr(context, node_c,
-							child_node_c,
-							&child_intersect);
-
-						/*
-						 * inc refs if pointer is not
-						 * to node b.
-						 */
-						node_a_refs += (child_node_c !=
-							node_b->ptrs[m].ptr);
-
-						/*
-						 * Remove intersection from C
-						 * pointer.
-						 */
-						if (!acl_exclude(
-							&node_c->ptrs[n].values,
-							&node_c->ptrs[n].values,
-							&child_intersect)) {
-							acl_deref_ptr(context,
-								node_c, n);
-							node_c->ptrs[n].ptr =
-								NULL;
-							node_a_refs--;
-						}
-					}
+				if ((child_intersect_type & ACL_INTERSECT) ==
+						0)
+					continue;
+
+				if (acl_merge_trie(context,
+						node_c->ptrs[n].ptr,
+						node_b->ptrs[m].ptr,
+						level + 1, subtree_id,
+						&child_node_c))
+					return 1;
+
+				if (!(child_node_c != NULL &&
+					child_node_c !=
+					node_c->ptrs[n].ptr))
+					continue;
+
+				node_b_refs++;
+
+				/*
+				 * Added link from C to
+				 * child_C for all transitions
+				 * in the intersection.
+				 */
+				acl_add_ptr(context, node_c,
+					child_node_c,
+					&child_intersect);
+
+				/*
+				 * inc refs if pointer is not
+				 * to node b.
+				 */
+				node_a_refs += (child_node_c !=
+						node_b->ptrs[m].ptr);
+
+				/*
+				 * Remove intersection from C
+				 * pointer.
+				 */
+				if (!acl_exclude(
+					&node_c->ptrs[n].values,
+					&node_c->ptrs[n].values,
+					&child_intersect)) {
+					acl_deref_ptr(context,
+						node_c, n);
+					node_c->ptrs[n].ptr = NULL;
+					node_a_refs--;
 				}
 			}
 		}
@@ -1419,9 +1424,11 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head,
 		 * Setup the results for this rule.
 		 * The result and priority of each category.
 		 */
-		if (end->mrt == NULL &&
-				(end->mrt = acl_build_alloc(context, 1,
-				sizeof(*end->mrt))) == NULL)
+		if (end->mrt == NULL)
+			end->mrt = acl_build_alloc(context, 1,
+					sizeof(*end->mrt));
+
+		if (end->mrt == NULL)
 			return NULL;
 
 		for (m = 0; m < context->cfg.num_categories; m++) {
@@ -1806,6 +1813,7 @@ acl_build_tries(struct acl_build_context *context,
 			next = rule->next;
 			for (m = 0; m < config->num_fields; m++) {
 				int x = config->defs[m].field_index;
+
 				if (rule->wildness[x] < wild_limit[m]) {
 					move = 0;
 					break;
@@ -1983,20 +1991,24 @@ rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
 		rc = -EINVAL;
 
 	/* build internal trie representation. */
-	} else if ((rc = acl_build_tries(&bcx, bcx.build_rules)) == 0) {
+	} else {
+		rc = acl_build_tries(&bcx, bcx.build_rules);
 
-		/* allocate and fill run-time  structures. */
-		rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
+		if (rc == 0) {
+
+			/* allocate and fill run-time  structures. */
+			rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
 				bcx.num_tries, bcx.cfg.num_categories,
 				RTE_ACL_IPV4VLAN_NUM * RTE_DIM(bcx.tries),
 				bcx.num_build_rules);
-		if (rc == 0) {
+			if (rc == 0) {
 
-			/* set data indexes. */
-			acl_set_data_indexes(ctx);
+				/* set data indexes. */
+				acl_set_data_indexes(ctx);
 
-			/* copy in build config. */
-			ctx->config = *cfg;
+				/* copy in build config. */
+				ctx->config = *cfg;
+			}
 		}
 	}
 
diff --git a/lib/librte_acl/rte_acl.c b/lib/librte_acl/rte_acl.c
index 547e6da..6cd0ca9 100644
--- a/lib/librte_acl/rte_acl.c
+++ b/lib/librte_acl/rte_acl.c
@@ -203,7 +203,8 @@ rte_acl_create(const struct rte_acl_param *param)
 			goto exit;
 		}
 
-		ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
+		ctx = rte_zmalloc_socket(name, sz,
+				RTE_CACHE_LINE_SIZE, param->socket_id);
 
 		if (ctx == NULL) {
 			RTE_LOG(ERR, ACL,
diff --git a/lib/librte_acl/rte_acl_osdep_alone.h b/lib/librte_acl/rte_acl_osdep_alone.h
index a84b6f9..c70dfb0 100644
--- a/lib/librte_acl/rte_acl_osdep_alone.h
+++ b/lib/librte_acl/rte_acl_osdep_alone.h
@@ -186,7 +186,8 @@ rte_rdtsc(void)
 /**
  * Force alignment to cache line.
  */
-#define	__rte_cache_aligned	__attribute__((__aligned__(RTE_CACHE_LINE_SIZE)))
+#define	__rte_cache_aligned
+		__attribute__((__aligned__(RTE_CACHE_LINE_SIZE)))
 
 
 /*
diff --git a/lib/librte_acl/tb_mem.c b/lib/librte_acl/tb_mem.c
index fdf3080..eba1723 100644
--- a/lib/librte_acl/tb_mem.c
+++ b/lib/librte_acl/tb_mem.c
@@ -49,8 +49,9 @@ tb_pool(struct tb_mem_pool *pool, size_t sz)
 	size = sz + pool->alignment - 1;
 	block = calloc(1, size + sizeof(*pool->block));
 	if (block == NULL) {
-		RTE_LOG(ERR, MALLOC, "%s(%zu)\n failed, currently allocated "
-			"by pool: %zu bytes\n", __func__, sz, pool->alloc);
+		RTE_LOG(ERR, MALLOC,
+		"%s(%zu)\n failed, currently allocated by pool: %zu bytes\n",
+		 __func__, sz, pool->alloc);
 		return NULL;
 	}
 
-- 
1.9.1
^ permalink raw reply	[flat|nested] 5+ messages in thread* [dpdk-dev] [PATCH 2/2] Fix checkpatch errors in librte_mempool
  2014-12-25 15:31 [dpdk-dev] [PATCH 0/2] Fix checkpatch errors Ravi Kerur
  2014-12-25 15:31 ` [dpdk-dev] [PATCH 1/2] Fix checkpatch errors in librte_acl Ravi Kerur
@ 2014-12-25 15:31 ` Ravi Kerur
  2015-01-05 12:11 ` [dpdk-dev] [PATCH 0/2] Fix checkpatch errors Bruce Richardson
  2 siblings, 0 replies; 5+ messages in thread
From: Ravi Kerur @ 2014-12-25 15:31 UTC (permalink / raw)
  To: dev
Fix checkpatch warnings and errors in lib/librte_mempool. checkpatch
is run as follows
scripts/checkpatch.pl --no-tree --file <file_name>
Following warning is treated as false-positive
1. WARNING: quoted string split across lines
Signed-off-by: Ravi Kerur <rkerur@gmail.com>
---
 lib/librte_mempool/rte_dom0_mempool.c | 41 +++++++++++-----------
 lib/librte_mempool/rte_mempool.c      | 64 +++++++++++++++++++++--------------
 lib/librte_mempool/rte_mempool.h      | 58 ++++++++++++++++++-------------
 3 files changed, 94 insertions(+), 69 deletions(-)
diff --git a/lib/librte_mempool/rte_dom0_mempool.c b/lib/librte_mempool/rte_dom0_mempool.c
index 9ec68fb..1545436 100644
--- a/lib/librte_mempool/rte_dom0_mempool.c
+++ b/lib/librte_mempool/rte_dom0_mempool.c
@@ -62,30 +62,31 @@
 
 static void
 get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num,
-            uint32_t pg_sz, uint32_t memseg_id)
+		uint32_t pg_sz, uint32_t memseg_id)
 {
-    uint32_t i;
-    uint64_t virt_addr, mfn_id;
-    struct rte_mem_config *mcfg;
-    uint32_t page_size = getpagesize();
-
-    /* get pointer to global configuration */
-    mcfg = rte_eal_get_configuration()->mem_config;
-    virt_addr =(uintptr_t) mcfg->memseg[memseg_id].addr;
-
-    for (i = 0; i != pg_num; i++) {
-        mfn_id = ((uintptr_t)va + i * pg_sz - virt_addr) / RTE_PGSIZE_2M;
-        pa[i] = mcfg->memseg[memseg_id].mfn[mfn_id] * page_size;
-    }
+	uint32_t i;
+	uint64_t virt_addr, mfn_id;
+	struct rte_mem_config *mcfg;
+	uint32_t page_size = getpagesize();
+
+	/* get pointer to global configuration */
+	mcfg = rte_eal_get_configuration()->mem_config;
+	virt_addr = (uintptr_t) mcfg->memseg[memseg_id].addr;
+
+	for (i = 0; i != pg_num; i++) {
+		mfn_id =
+		((uintptr_t)va + i * pg_sz - virt_addr) / RTE_PGSIZE_2M;
+		pa[i] = mcfg->memseg[memseg_id].mfn[mfn_id] * page_size;
+	}
 }
 
 /* create the mempool for supporting Dom0 */
 struct rte_mempool *
 rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size,
-           unsigned cache_size, unsigned private_data_size,
-           rte_mempool_ctor_t *mp_init, void *mp_init_arg,
-           rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
-           int socket_id, unsigned flags)
+		unsigned cache_size, unsigned private_data_size,
+		rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+		rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+		int socket_id, unsigned flags)
 {
 	struct rte_mempool *mp = NULL;
 	phys_addr_t *pa;
@@ -107,7 +108,7 @@ rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size,
 	pg_num = sz >> pg_shift;
 
 	/* extract physical mappings of the allocated memory. */
-	pa = calloc(pg_num, sizeof (*pa));
+	pa = calloc(pg_num, sizeof(*pa));
 	if (pa == NULL)
 		return mp;
 
@@ -130,5 +131,5 @@ rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size,
 
 	free(pa);
 
-	return (mp);
+	return mp;
 }
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 4cf6c25..e1374c4 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -196,7 +196,7 @@ rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
 		}
 	}
 
-	return (i);
+	return i;
 }
 
 /*
@@ -280,18 +280,20 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
 	 */
 	if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
 		unsigned new_size;
+
 		new_size = optimize_object_size(sz->header_size + sz->elt_size +
 			sz->trailer_size);
 		sz->trailer_size = new_size - sz->header_size - sz->elt_size;
 	}
 
-	if (! rte_eal_has_hugepages()) {
+	if (!rte_eal_has_hugepages()) {
 		/*
 		 * compute trailer size so that pool elements fit exactly in
 		 * a standard page
 		 */
 		int page_size = getpagesize();
 		int new_size = page_size - sz->header_size - sz->elt_size;
+
 		if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) {
 			printf("When hugepages are disabled, pool objects "
 			       "can't exceed PAGE_SIZE: %d + %d + %d > %d\n",
@@ -305,7 +307,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
 	/* this is the size of an object, including header and trailer */
 	sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
 
-	return (sz->total_size);
+	return sz->total_size;
 }
 
 
@@ -319,14 +321,16 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift)
 
 	pg_sz = (size_t)1 << pg_shift;
 
-	if ((n = pg_sz / elt_sz) > 0) {
+	n = pg_sz / elt_sz;
+
+	if (n > 0) {
 		pg_num = (elt_num + n - 1) / n;
 		sz = pg_num << pg_shift;
 	} else {
 		sz = RTE_ALIGN_CEIL(elt_sz, pg_sz) * elt_num;
 	}
 
-	return (sz);
+	return sz;
 }
 
 /*
@@ -335,9 +339,9 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift)
  */
 static void
 mempool_lelem_iter(void *arg, __rte_unused void *start, void *end,
-        __rte_unused uint32_t idx)
+			__rte_unused uint32_t idx)
 {
-        *(uintptr_t *)arg = (uintptr_t)end;
+	*(uintptr_t *)arg = (uintptr_t)end;
 }
 
 ssize_t
@@ -352,15 +356,16 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
 	va = (uintptr_t)vaddr;
 	uv = va;
 
-	if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
+	n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
 			paddr, pg_num, pg_shift, mempool_lelem_iter,
-			&uv)) != elt_num) {
+			&uv);
+
+	if (n != elt_num)
 		return (-n);
-	}
 
 	uv = RTE_ALIGN_CEIL(uv, pg_sz);
 	usz = uv - va;
-	return (usz);
+	return usz;
 }
 
 /* create the mempool */
@@ -491,16 +496,16 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
 	private_data_size = (private_data_size +
 			     RTE_CACHE_LINE_MASK) & (~RTE_CACHE_LINE_MASK);
 
-	if (! rte_eal_has_hugepages()) {
+	if (!rte_eal_has_hugepages()) {
 		/*
 		 * expand private data size to a whole page, so that the
 		 * first pool element will start on a new standard page
 		 */
 		int head = sizeof(struct rte_mempool);
 		int new_size = (private_data_size + head) % page_size;
-		if (new_size) {
+
+		if (new_size)
 			private_data_size += page_size - new_size;
-		}
 	}
 
 	/* try to allocate tailq entry */
@@ -519,7 +524,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
 	if (vaddr == NULL)
 		mempool_size += (size_t)objsz.total_size * n;
 
-	if (! rte_eal_has_hugepages()) {
+	if (!rte_eal_has_hugepages()) {
 		/*
 		 * we want the memory pool to start on a page boundary,
 		 * because pool elements crossing page boundaries would
@@ -542,15 +547,16 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
 	}
 
 	if (rte_eal_has_hugepages()) {
-		startaddr = (void*)mz->addr;
+		startaddr = (void *)mz->addr;
 	} else {
 		/* align memory pool start address on a page boundary */
 		unsigned long addr = (unsigned long)mz->addr;
+
 		if (addr & (page_size - 1)) {
 			addr += page_size;
 			addr &= ~(page_size - 1);
 		}
-		startaddr = (void*)addr;
+		startaddr = (void *)addr;
 	}
 
 	/* init the mempool structure */
@@ -587,7 +593,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
 	/* mempool elements in a separate chunk of memory. */
 	} else {
 		mp->elt_va_start = (uintptr_t)vaddr;
-		memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num);
+		memcpy(mp->elt_pa, paddr, sizeof(mp->elt_pa[0]) * pg_num);
 	}
 
 	mp->elt_va_end = mp->elt_va_start;
@@ -619,6 +625,7 @@ rte_mempool_count(const struct rte_mempool *mp)
 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
 	{
 		unsigned lcore_id;
+
 		if (mp->cache_size == 0)
 			return count;
 
@@ -720,7 +727,7 @@ mempool_audit_cookies(const struct rte_mempool *mp)
 #pragma GCC diagnostic error "-Wcast-qual"
 #endif
 #else
-#define mempool_audit_cookies(mp) do {} while(0)
+#define mempool_audit_cookies(mp) do {} while (0)
 #endif
 
 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
@@ -730,6 +737,7 @@ mempool_audit_cache(const struct rte_mempool *mp)
 {
 	/* check cache size consistency */
 	unsigned lcore_id;
+
 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
 		if (mp->local_cache[lcore_id].len > mp->cache_flushthresh) {
 			RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
@@ -739,7 +747,7 @@ mempool_audit_cache(const struct rte_mempool *mp)
 	}
 }
 #else
-#define mempool_audit_cache(mp) do {} while(0)
+#define mempool_audit_cache(mp) do {} while (0)
 #endif
 
 
@@ -831,8 +839,9 @@ rte_mempool_list_dump(FILE *f)
 	struct rte_tailq_entry *te;
 	struct rte_mempool_list *mempool_list;
 
-	if ((mempool_list =
-	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
+	mempool_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL,
+				rte_mempool_list);
+	if (mempool_list == NULL) {
 		rte_errno = E_RTE_NO_TAILQ;
 		return;
 	}
@@ -855,8 +864,10 @@ rte_mempool_lookup(const char *name)
 	struct rte_tailq_entry *te;
 	struct rte_mempool_list *mempool_list;
 
-	if ((mempool_list =
-	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
+	mempool_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL,
+					rte_mempool_list);
+
+	if (mempool_list == NULL) {
 		rte_errno = E_RTE_NO_TAILQ;
 		return NULL;
 	}
@@ -885,8 +896,9 @@ void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),
 	struct rte_tailq_entry *te = NULL;
 	struct rte_mempool_list *mempool_list;
 
-	if ((mempool_list =
-	     RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list)) == NULL) {
+	mempool_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL,
+				rte_mempool_list);
+	if (mempool_list == NULL) {
 		rte_errno = E_RTE_NO_TAILQ;
 		return;
 	}
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 3314651..2da5425 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -179,7 +179,9 @@ struct rte_mempool {
 	uintptr_t   elt_va_end;
 	/**< Virtual address of the <size + 1> mempool object. */
 	phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
-	/**< Array of physical pages addresses for the mempool objects buffer. */
+	/**< Array of physical pages addresses for the
+	 * mempool objects buffer.
+	 */
 
 }  __rte_cache_aligned;
 
@@ -200,11 +202,12 @@ struct rte_mempool {
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 #define __MEMPOOL_STAT_ADD(mp, name, n) do {			\
 		unsigned __lcore_id = rte_lcore_id();		\
+								\
 		mp->stats[__lcore_id].name##_objs += n;		\
 		mp->stats[__lcore_id].name##_bulk += 1;		\
-	} while(0)
+	} while (0)
 #else
-#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
 #endif
 
 /**
@@ -216,7 +219,7 @@ struct rte_mempool {
  */
 #define	MEMPOOL_HEADER_SIZE(mp, pgn)	(sizeof(*(mp)) + \
 	RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
-	sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
+	sizeof((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
 
 /**
  * Returns TRUE if whole mempool is allocated in one contiguous block of memory.
@@ -257,6 +260,7 @@ static inline struct rte_mempool **__mempool_from_obj(void *obj)
 static inline const struct rte_mempool *rte_mempool_from_obj(void *obj)
 {
 	struct rte_mempool * const *mpp;
+
 	mpp = __mempool_from_obj(obj);
 	return *mpp;
 }
@@ -272,6 +276,7 @@ static inline uint64_t __mempool_read_header_cookie(const void *obj)
 static inline uint64_t __mempool_read_trailer_cookie(void *obj)
 {
 	struct rte_mempool **mpp = __mempool_from_obj(obj);
+
 	return *(uint64_t *)((char *)obj + (*mpp)->elt_size);
 }
 
@@ -279,6 +284,7 @@ static inline uint64_t __mempool_read_trailer_cookie(void *obj)
 static inline void __mempool_write_header_cookie(void *obj, int free)
 {
 	uint64_t *cookie_p;
+
 	cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t));
 	if (free == 0)
 		*cookie_p = RTE_MEMPOOL_HEADER_COOKIE1;
@@ -292,6 +298,7 @@ static inline void __mempool_write_trailer_cookie(void *obj)
 {
 	uint64_t *cookie_p;
 	struct rte_mempool **mpp = __mempool_from_obj(obj);
+
 	cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size);
 	*cookie_p = RTE_MEMPOOL_TRAILER_COOKIE;
 }
@@ -333,8 +340,8 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp,
 		obj = obj_table[n];
 
 		if (rte_mempool_from_obj(obj) != mp)
-			rte_panic("MEMPOOL: object is owned by another "
-				  "mempool\n");
+			rte_panic(
+			"MEMPOOL: object is owned by another mempool\n");
 
 		cookie = __mempool_read_header_cookie(obj);
 
@@ -342,30 +349,29 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp,
 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
 				rte_log_set_history(0);
 				RTE_LOG(CRIT, MEMPOOL,
-					"obj=%p, mempool=%p, cookie=%"PRIx64"\n",
-					obj, mp, cookie);
+				"obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+				obj, mp, cookie);
 				rte_panic("MEMPOOL: bad header cookie (put)\n");
 			}
 			__mempool_write_header_cookie(obj, 1);
-		}
-		else if (free == 1) {
+		} else if (free == 1) {
 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
 				rte_log_set_history(0);
 				RTE_LOG(CRIT, MEMPOOL,
-					"obj=%p, mempool=%p, cookie=%"PRIx64"\n",
-					obj, mp, cookie);
+				"obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+				obj, mp, cookie);
 				rte_panic("MEMPOOL: bad header cookie (get)\n");
 			}
 			__mempool_write_header_cookie(obj, 0);
-		}
-		else if (free == 2) {
+		} else if (free == 2) {
 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
 			    cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
 				rte_log_set_history(0);
 				RTE_LOG(CRIT, MEMPOOL,
-					"obj=%p, mempool=%p, cookie=%"PRIx64"\n",
-					obj, mp, cookie);
-				rte_panic("MEMPOOL: bad header cookie (audit)\n");
+				"obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+				obj, mp, cookie);
+				rte_panic(
+				"MEMPOOL: bad header cookie (audit)\n");
 			}
 		}
 		cookie = __mempool_read_trailer_cookie(obj);
@@ -382,7 +388,7 @@ static inline void __mempool_check_cookies(const struct rte_mempool *mp,
 #pragma GCC diagnostic error "-Wcast-qual"
 #endif
 #else
-#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
+#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while (0)
 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
 
 /**
@@ -807,8 +813,7 @@ ring_enqueue:
 	if (is_mp) {
 		if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
 			rte_panic("cannot put objects in mempool\n");
-	}
-	else {
+	} else {
 		if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
 			rte_panic("cannot put objects in mempool\n");
 	}
@@ -963,8 +968,11 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
 		/* No. Backfill the cache first, and then fill from it */
 		uint32_t req = n + (cache_size - cache->len);
 
-		/* How many do we require i.e. number to fill the cache + the request */
-		ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
+		/* How many do we require i.e. number to fill the
+		 * cache + the request
+		 */
+		ret = rte_ring_mc_dequeue_bulk(mp->ring,
+					&cache->objs[cache->len], req);
 		if (unlikely(ret < 0)) {
 			/*
 			 * In the offchance that we are buffer constrained,
@@ -979,7 +987,8 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
 	}
 
 	/* Now fill in the response ... */
-	for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
+	for (index = 0, len = cache->len - 1; index < n;
+		++index, len--, obj_table++)
 		*obj_table = cache_objs[len];
 
 	cache->len -= n;
@@ -1027,6 +1036,7 @@ static inline int __attribute__((always_inline))
 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
 	int ret;
+
 	ret = __mempool_get_bulk(mp, obj_table, n, 1);
 	if (ret == 0)
 		__mempool_check_cookies(mp, obj_table, n, 1);
@@ -1056,6 +1066,7 @@ static inline int __attribute__((always_inline))
 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
 	int ret;
+
 	ret = __mempool_get_bulk(mp, obj_table, n, 0);
 	if (ret == 0)
 		__mempool_check_cookies(mp, obj_table, n, 1);
@@ -1088,6 +1099,7 @@ static inline int __attribute__((always_inline))
 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
 	int ret;
+
 	ret = __mempool_get_bulk(mp, obj_table, n,
 				 !(mp->flags & MEMPOOL_F_SC_GET));
 	if (ret == 0)
-- 
1.9.1
^ permalink raw reply	[flat|nested] 5+ messages in thread