DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ashwin Sekhar T K <asekhar@marvell.com>
To: <dev@dpdk.org>, Ashwin Sekhar T K <asekhar@marvell.com>,
	Pavan Nikhilesh <pbhagavatula@marvell.com>
Cc: <jerinj@marvell.com>, <skori@marvell.com>,
	<skoteshwar@marvell.com>, <kirankumark@marvell.com>,
	<psatheesh@marvell.com>, <anoobj@marvell.com>,
	<gakhil@marvell.com>, <hkalra@marvell.com>,
	<ndabilpuram@marvell.com>
Subject: [PATCH v2 4/5] mempool/cnxk: add hwpool ops
Date: Tue, 23 May 2023 16:24:32 +0530	[thread overview]
Message-ID: <20230523105433.719998-4-asekhar@marvell.com> (raw)
In-Reply-To: <20230523105433.719998-1-asekhar@marvell.com>

Add hwpool ops which can used to create a rte_mempool that attaches
to another rte_mempool. The hwpool will not have its own buffers and
will have a dummy populate callback. Only an NPA aura will be allocated
for this rte_mempool. The buffers will be allocate from the NPA pool
of the attached rte_mempool.

Only mbuf objects are supported in hwpool. Generic objects are not
supported. Note that this pool will not have any range check enabled.
So user will be able to free any pointer into this pool. HW will not
throw error interrupts if invalid buffers are passed. So user must be
careful when using this pool.

Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
 drivers/mempool/cnxk/cn10k_hwpool_ops.c | 211 ++++++++++++++++++++++++
 drivers/mempool/cnxk/cnxk_mempool.h     |   4 +
 drivers/mempool/cnxk/meson.build        |   1 +
 3 files changed, 216 insertions(+)
 create mode 100644 drivers/mempool/cnxk/cn10k_hwpool_ops.c

diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
new file mode 100644
index 0000000000..9238765155
--- /dev/null
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include <rte_mempool.h>
+
+#include "roc_api.h"
+#include "cnxk_mempool.h"
+
+#define CN10K_HWPOOL_MEM_SIZE 128
+
+static int __rte_hot
+cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int n)
+{
+	struct rte_mempool *mp;
+	unsigned int index;
+
+	mp = CNXK_MEMPOOL_CONFIG(hp);
+	/* Ensure mbuf init changes are written before the free pointers
+	 * are enqueued to the stack.
+	 */
+	rte_io_wmb();
+	for (index = 0; index < n; index++) {
+		struct rte_mempool_objhdr *hdr;
+		struct rte_mbuf *m;
+
+		m = PLT_PTR_CAST(obj_table[index]);
+		/* Update mempool information in the mbuf */
+		hdr = rte_mempool_get_header(obj_table[index]);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+		if (hdr->mp != m->pool || hdr->mp != hp)
+			plt_err("Pool Header Mismatch");
+#endif
+		m->pool = mp;
+		hdr->mp = mp;
+		roc_npa_aura_op_free(hp->pool_id, 0,
+				     (uint64_t)obj_table[index]);
+	}
+
+	return 0;
+}
+
+static int __rte_hot
+cn10k_hwpool_deq(struct rte_mempool *hp, void **obj_table, unsigned int n)
+{
+	unsigned int index;
+	uint64_t obj;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+	struct rte_mempool *mp;
+
+	mp = CNXK_MEMPOOL_CONFIG(hp);
+#endif
+
+	for (index = 0; index < n; index++, obj_table++) {
+		struct rte_mempool_objhdr *hdr;
+		struct rte_mbuf *m;
+		int retry = 4;
+
+		/* Retry few times before failing */
+		do {
+			obj = roc_npa_aura_op_alloc(hp->pool_id, 0);
+		} while (retry-- && (obj == 0));
+
+		if (obj == 0) {
+			cn10k_hwpool_enq(hp, obj_table - index, index);
+			return -ENOENT;
+		}
+		/* Update mempool information in the mbuf */
+		hdr = rte_mempool_get_header(PLT_PTR_CAST(obj));
+		m = PLT_PTR_CAST(obj);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+		if (hdr->mp != m->pool || hdr->mp != mp)
+			plt_err("Pool Header Mismatch");
+#endif
+		m->pool = hp;
+		hdr->mp = hp;
+		*obj_table = (void *)obj;
+	}
+
+	return 0;
+}
+
+static unsigned int
+cn10k_hwpool_get_count(const struct rte_mempool *hp)
+{
+	return (unsigned int)roc_npa_aura_op_available(hp->pool_id);
+}
+
+static int
+cn10k_hwpool_alloc(struct rte_mempool *hp)
+{
+	uint64_t aura_handle = 0;
+	struct rte_mempool *mp;
+	uint32_t pool_id;
+	int rc;
+
+	if (hp->cache_size) {
+		plt_err("Hwpool does not support cache");
+		return -EINVAL;
+	}
+
+	if (CNXK_MEMPOOL_FLAGS(hp)) {
+		plt_err("Flags must not be passed to hwpool ops");
+		return -EINVAL;
+	}
+
+	mp = CNXK_MEMPOOL_CONFIG(hp);
+	if (!mp) {
+		plt_err("Invalid rte_mempool passed as pool_config");
+		return -EINVAL;
+	}
+	if (mp->cache_size) {
+		plt_err("Hwpool does not support attaching to pool with cache");
+		return -EINVAL;
+	}
+
+	if (hp->elt_size != mp->elt_size ||
+	    hp->header_size != mp->header_size ||
+	    hp->trailer_size != mp->trailer_size || hp->size != mp->size) {
+		plt_err("Hwpool parameters matching with master pool");
+		return -EINVAL;
+	}
+
+	/* Create the NPA aura */
+	pool_id = roc_npa_aura_handle_to_aura(mp->pool_id);
+	rc = roc_npa_aura_create(&aura_handle, hp->size, NULL, (int)pool_id, 0);
+	if (rc) {
+		plt_err("Failed to create aura rc=%d", rc);
+		return rc;
+	}
+
+	/* Set the flags for the hardware pool */
+	CNXK_MEMPOOL_SET_FLAGS(hp, CNXK_MEMPOOL_F_IS_HWPOOL);
+	hp->pool_id = aura_handle;
+	plt_npa_dbg("aura_handle=0x%" PRIx64, aura_handle);
+
+	return 0;
+}
+
+static void
+cn10k_hwpool_free(struct rte_mempool *hp)
+{
+	int rc = 0;
+
+	plt_npa_dbg("aura_handle=0x%" PRIx64, hp->pool_id);
+	/* It can happen that rte_mempool_free() is called immediately after
+	 * rte_mempool_create_empty(). In such cases the NPA pool will not be
+	 * allocated.
+	 */
+	if (roc_npa_aura_handle_to_base(hp->pool_id) == 0)
+		return;
+
+	rc = roc_npa_aura_destroy(hp->pool_id);
+	if (rc)
+		plt_err("Failed to destroy aura rc=%d", rc);
+}
+
+static ssize_t
+cn10k_hwpool_calc_mem_size(const struct rte_mempool *hp, uint32_t obj_num,
+			   uint32_t pg_shift, size_t *min_chunk_size,
+			   size_t *align)
+{
+	RTE_SET_USED(hp);
+	RTE_SET_USED(obj_num);
+	RTE_SET_USED(pg_shift);
+	*min_chunk_size = CN10K_HWPOOL_MEM_SIZE;
+	*align = CN10K_HWPOOL_MEM_SIZE;
+	/* Return a minimum mem size so that hwpool can also be initialized just
+	 * like a regular pool. This memzone will not be used anywhere.
+	 */
+	return CN10K_HWPOOL_MEM_SIZE;
+}
+
+static int
+cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int max_objs,
+		      void *vaddr, rte_iova_t iova, size_t len,
+		      rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+	uint64_t start_iova, end_iova;
+	struct rte_mempool *mp;
+
+	RTE_SET_USED(max_objs);
+	RTE_SET_USED(vaddr);
+	RTE_SET_USED(iova);
+	RTE_SET_USED(len);
+	RTE_SET_USED(obj_cb);
+	RTE_SET_USED(obj_cb_arg);
+	/* HW pools does not require populating anything as these pools are
+	 * only associated with NPA aura. The NPA pool being used is that of
+	 * another rte_mempool. Only copy the iova range from the aura of
+	 * the other rte_mempool to this pool's aura.
+	 */
+	mp = CNXK_MEMPOOL_CONFIG(hp);
+	roc_npa_aura_op_range_get(mp->pool_id, &start_iova, &end_iova);
+	roc_npa_aura_op_range_set(hp->pool_id, start_iova, end_iova);
+
+	return hp->size;
+}
+
+static struct rte_mempool_ops cn10k_hwpool_ops = {
+	.name = "cn10k_hwpool_ops",
+	.alloc = cn10k_hwpool_alloc,
+	.free = cn10k_hwpool_free,
+	.enqueue = cn10k_hwpool_enq,
+	.dequeue = cn10k_hwpool_deq,
+	.get_count = cn10k_hwpool_get_count,
+	.calc_mem_size = cn10k_hwpool_calc_mem_size,
+	.populate = cn10k_hwpool_populate,
+};
+
+RTE_MEMPOOL_REGISTER_OPS(cn10k_hwpool_ops);
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index fc2e4b5b70..4ca05d53e1 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -16,6 +16,10 @@ enum cnxk_mempool_flags {
 	 * as pool config to create the pool.
 	 */
 	CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+	/* This flag indicates whether the pool is a hardware pool or not.
+	 * This flag is set by the driver.
+	 */
+	CNXK_MEMPOOL_F_IS_HWPOOL = RTE_BIT64(2),
 };
 
 #define CNXK_MEMPOOL_F_MASK 0xFUL
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
index 50856ecde8..ce152bedd2 100644
--- a/drivers/mempool/cnxk/meson.build
+++ b/drivers/mempool/cnxk/meson.build
@@ -14,6 +14,7 @@ sources = files(
         'cnxk_mempool_telemetry.c',
         'cn9k_mempool_ops.c',
         'cn10k_mempool_ops.c',
+        'cn10k_hwpool_ops.c',
 )
 
 deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
-- 
2.25.1


  parent reply	other threads:[~2023-05-23 10:55 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-11  7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
2023-05-17 18:46   ` Jerin Jacob
2023-05-23  9:04 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23  9:04   ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23  9:04   ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-05-23  9:04   ` [PATCH v2 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
2023-05-23  9:04   ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
2023-05-24  9:33     ` Jerin Jacob
2023-05-23  9:13 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23  9:13   ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23  9:13   ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-05-23  9:27   ` Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 10:54   ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23 10:54   ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-05-23 10:54   ` Ashwin Sekhar T K [this message]
2023-05-23 10:54   ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230523105433.719998-4-asekhar@marvell.com \
    --to=asekhar@marvell.com \
    --cc=anoobj@marvell.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=hkalra@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=psatheesh@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).