DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Hunt <david.hunt@intel.com>
To: dev@dpdk.org
Cc: olivier.matz@6wind.com, viktorin@rehivetech.com,
	jerin.jacob@caviumnetworks.com, shreyansh.jain@nxp.com,
	David Hunt <david.hunt@intel.com>
Subject: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
Date: Mon, 20 Jun 2016 14:08:10 +0100	[thread overview]
Message-ID: <1466428091-115821-2-git-send-email-david.hunt@intel.com> (raw)
In-Reply-To: <1466428091-115821-1-git-send-email-david.hunt@intel.com>

This is a mempool handler that is useful for pipelining apps, where
the mempool cache doesn't really work - example, where we have one
core doing rx (and alloc), and another core doing Tx (and return). In
such a case, the mempool ring simply cycles through all the mbufs,
resulting in a LLC miss on every mbuf allocated when the number of
mbufs is large. A stack recycles buffers more effectively in this
case.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
 2 files changed, 146 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index a4c089e..057a6ab 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -44,6 +44,7 @@ LIBABIVER := 2
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ops.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
 
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..a92da69
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,145 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+struct rte_mempool_stack {
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+};
+
+static int
+stack_alloc(struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s;
+	unsigned n = mp->size;
+	int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+	/* Allocate our local memory structure */
+	s = rte_zmalloc_socket("mempool-stack",
+			size,
+			RTE_CACHE_LINE_SIZE,
+			mp->socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return -ENOMEM;
+	}
+
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->pool_data = s;
+
+	return 0;
+}
+
+static int stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index;
+
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if ((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOBUFS;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int stack_dequeue(struct rte_mempool *mp, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index, len;
+
+	rte_spinlock_lock(&s->sl);
+
+	if (unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n;
+			++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned
+stack_get_count(const struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+
+	return s->len;
+}
+
+static void
+stack_free(struct rte_mempool *mp)
+{
+	rte_free((void *)(mp->pool_data));
+}
+
+static struct rte_mempool_ops ops_stack = {
+	.name = "stack",
+	.alloc = stack_alloc,
+	.free = stack_free,
+	.enqueue = stack_enqueue,
+	.dequeue = stack_dequeue,
+	.get_count = stack_get_count
+};
+
+MEMPOOL_REGISTER_OPS(ops_stack);
-- 
2.5.5

  reply	other threads:[~2016-06-20 13:09 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-05 18:29 [dpdk-dev] [PATCH 0/2] mempool: add stack (fifo) " David Hunt
2016-05-05 18:29 ` [dpdk-dev] [PATCH 1/2] " David Hunt
2016-05-05 21:28   ` Stephen Hemminger
2016-05-19 15:21     ` Hunt, David
2016-05-05 18:29 ` [dpdk-dev] [PATCH 2/2] test: add autotest for external mempool stack handler David Hunt
2016-05-06  8:34 ` [dpdk-dev] [PATCH 0/2] mempool: add stack (fifo) mempool handler Tan, Jianfeng
2016-05-06 23:02   ` Hunt, David
2016-05-19 14:48 ` [dpdk-dev] v2 mempool: add stack (lifo) " David Hunt
2016-05-19 14:48   ` [dpdk-dev] [PATCH v2 1/3] " David Hunt
2016-05-23 12:55     ` Olivier Matz
2016-06-15 10:10       ` Hunt, David
2016-06-17 14:18       ` Hunt, David
2016-06-20  8:17         ` Olivier Matz
2016-06-20 12:59           ` Hunt, David
2016-06-29 14:31             ` Olivier MATZ
2016-05-19 14:48   ` [dpdk-dev] [PATCH v2 2/3] mempool: make declaration of handler structs const David Hunt
2016-05-23 12:55     ` Olivier Matz
2016-05-24 14:01       ` Hunt, David
2016-05-19 14:48   ` [dpdk-dev] [PATCH v2 3/3] test: add autotest for external mempool stack handler David Hunt
2016-05-19 15:16   ` [dpdk-dev] v2 mempool: add stack (lifo) mempool handler Hunt, David
2016-06-20 13:08   ` [dpdk-dev] mempool: add stack " David Hunt
2016-06-20 13:08     ` David Hunt [this message]
2016-06-20 13:25       ` [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) " Jerin Jacob
2016-06-20 13:54         ` Thomas Monjalon
2016-06-20 13:58           ` Ananyev, Konstantin
2016-06-20 14:22             ` Jerin Jacob
2016-06-20 17:56               ` Ananyev, Konstantin
2016-06-21  3:35                 ` Jerin Jacob
2016-06-21  9:28                   ` Ananyev, Konstantin
2016-06-21  9:44                     ` Olivier Matz
2016-06-21  3:42           ` Jerin Jacob
2016-06-20 13:08     ` [dpdk-dev] [PATCH v3 2/2] test: add autotest for external mempool stack handler David Hunt
2016-06-30  7:41     ` [dpdk-dev] [PATCH v4 0/2] mempool: add stack mempool handler David Hunt
2016-06-30  7:41       ` [dpdk-dev] [PATCH v4 1/2] mempool: add stack (lifo) " David Hunt
2016-06-30  7:41       ` [dpdk-dev] [PATCH v4 2/2] test: migrate custom handler test to stack handler David Hunt
2016-06-30  9:45         ` Thomas Monjalon
2016-06-30 17:36           ` Hunt, David
2016-06-30 17:46             ` Thomas Monjalon
2016-06-30 17:49               ` Hunt, David
2016-06-30 18:05       ` [dpdk-dev] [PATCH v5 0/2] mempool: add stack mempool handler David Hunt
2016-06-30 18:05         ` [dpdk-dev] [PATCH v5 1/2] mempool: add stack (lifo) " David Hunt
2016-06-30 18:05         ` [dpdk-dev] [PATCH v5 2/2] test: migrate custom handler test to stack handler David Hunt
2016-07-01  7:32           ` Olivier MATZ
2016-07-01  7:46         ` [dpdk-dev] [PATCH v6 0/2] mempool: add stack mempool handler David Hunt
2016-07-01  7:46           ` [dpdk-dev] [PATCH v6 1/2] mempool: add stack (lifo) " David Hunt
2016-07-01  7:46           ` [dpdk-dev] [PATCH v6 2/2] test: migrate custom handler test to stack handler David Hunt
2016-07-01  8:18           ` [dpdk-dev] [PATCH v6 0/2] mempool: add stack mempool handler Olivier MATZ
2016-07-01 10:41             ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1466428091-115821-2-git-send-email-david.hunt@intel.com \
    --to=david.hunt@intel.com \
    --cc=dev@dpdk.org \
    --cc=jerin.jacob@caviumnetworks.com \
    --cc=olivier.matz@6wind.com \
    --cc=shreyansh.jain@nxp.com \
    --cc=viktorin@rehivetech.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).