DPDK patches and discussions
 help / color / mirror / Atom feed
From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
To: santosh.shukla@caviumnetworks.com, olivier.matz@6wind.com
Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Subject: [dpdk-dev] [PATCH 2/2] mempool/octeontx: clean up memory area registration
Date: Fri, 15 Dec 2017 21:30:31 +0530	[thread overview]
Message-ID: <20171215160031.11354-2-pbhagavatula@caviumnetworks.com> (raw)
In-Reply-To: <20171215160031.11354-1-pbhagavatula@caviumnetworks.com>

Clean up dependency between alloc and memory area registration, this
removes the need for SLIST data structure and octeontx_pool_list.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/mempool/octeontx/octeontx_fpavf.c       | 23 ++------
 drivers/mempool/octeontx/octeontx_fpavf.h       |  6 ++-
 drivers/mempool/octeontx/rte_mempool_octeontx.c | 72 ++-----------------------
 3 files changed, 12 insertions(+), 89 deletions(-)

diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 3bc50f3..28f431e 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -386,8 +386,8 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
 	return ret;
 }
 
-static int
-octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
 			  void *memva, uint16_t gpool)
 {
 	uint64_t va_end;
@@ -509,12 +509,9 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
 
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-				unsigned int buf_offset, char **va_start,
-				int node_id)
+				unsigned int buf_offset, int node_id)
 {
 	unsigned int gpool;
-	void *memva;
-	unsigned long memsz;
 	uintptr_t gpool_handle;
 	uintptr_t pool_bar;
 	int res;
@@ -522,9 +519,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 	RTE_SET_USED(node_id);
 	RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
 
-	if (unlikely(*va_start == NULL))
-		goto error_end;
-
 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
 	if (object_size > FPA_MAX_OBJ_SIZE) {
 		errno = EINVAL;
@@ -567,15 +561,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 		goto error_pool_destroy;
 	}
 
-	/* vf pool setup */
-	memsz = object_size * object_count;
-	memva = *va_start;
-	res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
-	if (res < 0) {
-		errno = res;
-		goto error_gaura_detach;
-	}
-
 	/* Release lock */
 	rte_spinlock_unlock(&fpadev.lock);
 
@@ -591,8 +576,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 
 	return gpool_handle;
 
-error_gaura_detach:
-	(void) octeontx_fpapf_aura_detach(gpool);
 error_pool_destroy:
 	octeontx_fpavf_free(gpool);
 	octeontx_fpapf_pool_destroy(gpool);
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index 1d09f00..bc5dc3b 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -114,8 +114,10 @@ do {							\
 
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-				unsigned int buf_offset, char **va_start,
-				int node);
+				unsigned int buf_offset, int node);
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+			  void *memva, uint16_t gpool);
 int
 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
 int
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index e89355c..fc0cab9 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -36,55 +36,18 @@
 
 #include "octeontx_fpavf.h"
 
-/*
- * Per-pool descriptor.
- * Links mempool with the corresponding memzone,
- * that provides memory under the pool's elements.
- */
-struct octeontx_pool_info {
-	const struct rte_mempool *mp;
-	uintptr_t mz_addr;
-
-	SLIST_ENTRY(octeontx_pool_info) link;
-};
-
-SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
-
-/* List of the allocated pools */
-static struct octeontx_pool_list octeontx_pool_head =
-				SLIST_HEAD_INITIALIZER(octeontx_pool_head);
-/* Spinlock to protect pool list */
-static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
-
 static int
 octeontx_fpavf_alloc(struct rte_mempool *mp)
 {
 	uintptr_t pool;
-	struct octeontx_pool_info *pool_info;
 	uint32_t memseg_count = mp->size;
 	uint32_t object_size;
-	uintptr_t va_start;
 	int rc = 0;
 
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
-		if (pool_info->mp == mp)
-			break;
-	}
-	if (pool_info == NULL) {
-		rte_spinlock_unlock(&pool_list_lock);
-		return -ENXIO;
-	}
-
-	/* virtual hugepage mapped addr */
-	va_start = pool_info->mz_addr;
-	rte_spinlock_unlock(&pool_list_lock);
-
 	object_size = mp->elt_size + mp->header_size + mp->trailer_size;
 
 	pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
 						OCTEONTX_FPAVF_BUF_OFFSET,
-						(char **)&va_start,
 						mp->socket_id);
 	rc = octeontx_fpa_bufpool_block_size(pool);
 	if (rc < 0)
@@ -109,27 +72,9 @@ octeontx_fpavf_alloc(struct rte_mempool *mp)
 static void
 octeontx_fpavf_free(struct rte_mempool *mp)
 {
-	struct octeontx_pool_info *pool_info;
 	uintptr_t pool;
-
 	pool = (uintptr_t)mp->pool_id;
 
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
-		if (pool_info->mp == mp)
-			break;
-	}
-
-	if (pool_info == NULL) {
-		rte_spinlock_unlock(&pool_list_lock);
-		rte_panic("%s: trying to free pool with no valid metadata",
-		    __func__);
-	}
-
-	SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
-	rte_spinlock_unlock(&pool_list_lock);
-
-	rte_free(pool_info);
 	octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
 }
 
@@ -222,21 +167,14 @@ static int
 octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
 				    char *vaddr, rte_iova_t paddr, size_t len)
 {
-	struct octeontx_pool_info *pool_info;
-
 	RTE_SET_USED(paddr);
-	RTE_SET_USED(len);
+	uint8_t gpool;
+	uintptr_t pool_bar;
 
-	pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
-	if (pool_info == NULL)
-		return -ENOMEM;
+	gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
+	pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
 
-	pool_info->mp = mp;
-	pool_info->mz_addr = (uintptr_t)vaddr;
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
-	rte_spinlock_unlock(&pool_list_lock);
-	return 0;
+	return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
 }
 
 static struct rte_mempool_ops octeontx_fpavf_ops = {
-- 
2.7.4

  reply	other threads:[~2017-12-15 16:00 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-15 16:00 [dpdk-dev] [PATCH 1/2] mempool: notify mempool area after mempool alloc Pavan Nikhilesh
2017-12-15 16:00 ` Pavan Nikhilesh [this message]
2017-12-18  5:00   ` [dpdk-dev] [PATCH 2/2] mempool/octeontx: clean up memory area registration santosh
2017-12-18 21:54     ` Pavan Nikhilesh
2017-12-18  4:56 ` [dpdk-dev] [PATCH 1/2] mempool: notify mempool area after mempool alloc santosh
2017-12-19 18:09 ` [dpdk-dev] [PATCH v2 " Pavan Nikhilesh
2017-12-19 18:09   ` [dpdk-dev] [PATCH v2 2/2] mempool/octeontx: clean up memory area registration Pavan Nikhilesh
2017-12-22 15:11 ` [dpdk-dev] [PATCH 1/2] mempool: notify mempool area after mempool alloc Olivier MATZ
2017-12-24  7:07   ` Pavan Nikhilesh
2017-12-24 12:47 ` [dpdk-dev] [PATCH v3 1/2] mempool: fix first memory area notification Pavan Nikhilesh
2017-12-24 12:47   ` [dpdk-dev] [PATCH v3 2/2] mempool/octeontx: fix memory area registration Pavan Nikhilesh
2018-01-12 17:36   ` [dpdk-dev] [PATCH v3 1/2] mempool: fix first memory area notification Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171215160031.11354-2-pbhagavatula@caviumnetworks.com \
    --to=pbhagavatula@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=olivier.matz@6wind.com \
    --cc=santosh.shukla@caviumnetworks.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).