DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ashwin Sekhar T K <asekhar@marvell.com>
To: <dev@dpdk.org>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>,
	Ashwin Sekhar T K <asekhar@marvell.com>,
	"Pavan Nikhilesh" <pbhagavatula@marvell.com>
Cc: <jerinj@marvell.com>, <psatheesh@marvell.com>,
	<anoobj@marvell.com>, <gakhil@marvell.com>, <hkalra@marvell.com>
Subject: [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs
Date: Tue, 23 May 2023 14:57:39 +0530	[thread overview]
Message-ID: <20230523092739.718214-1-asekhar@marvell.com> (raw)
In-Reply-To: <20230523091400.717834-1-asekhar@marvell.com>

Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.

Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.

The existing aura range set functionality has been moved
as a pool range set API.

Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
 drivers/common/cnxk/roc_nix_queue.c     |  2 +-
 drivers/common/cnxk/roc_npa.c           | 35 ++++++++++++++++++++++++-
 drivers/common/cnxk/roc_npa.h           |  6 +++++
 drivers/common/cnxk/roc_sso.c           |  2 +-
 drivers/common/cnxk/version.map         |  2 ++
 drivers/mempool/cnxk/cnxk_mempool_ops.c |  2 +-
 6 files changed, 45 insertions(+), 4 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 		goto npa_fail;
 	}
 
-	roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+	roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
 	roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
 	sq->aura_sqb_bufs = nb_sqb_bufs;
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
 }
 
 void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
 			  uint64_t end_iova)
 {
 	const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
 	PLT_ASSERT(lf);
 	lim = lf->aura_lim;
 
+	/* Change the range bookkeeping in software as well as in hardware */
 	lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
 	lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
 
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
 	roc_store_pair(lim[reg].ptr_end, reg, end);
 }
 
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+			  uint64_t end_iova)
+{
+	uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+	struct npa_lf *lf = idev_npa_obj_get();
+	struct npa_aura_lim *lim;
+
+	PLT_ASSERT(lf);
+	lim = lf->aura_lim;
+
+	/* Change only the bookkeeping in software */
+	lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+	lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+			  uint64_t *end_iova)
+{
+	uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	struct npa_aura_lim *lim;
+	struct npa_lf *lf;
+
+	lf = idev_npa_obj_get();
+	PLT_ASSERT(lf);
+
+	lim = lf->aura_lim;
+	*start_iova = lim[aura_id].ptr_start;
+	*end_iova = lim[aura_id].ptr_end;
+}
+
 static int
 npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura,
 		   struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 					 uint64_t start_iova,
 					 uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+					 uint64_t *start_iova,
+					 uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+					 uint64_t start_iova,
+					 uint64_t end_iova);
 int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
 				  struct npa_aura_s *aura, int pool_id,
 				  uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
 		roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
 		iova += xaq_buf_size;
 	}
-	roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+	roc_npa_pool_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
 
 	if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) !=
 	    xaq->nb_xaq) {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 9414b55e9c..5281c71550 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -354,6 +354,7 @@ INTERNAL {
 	roc_npa_buf_type_update;
 	roc_npa_aura_drop_set;
 	roc_npa_aura_limit_modify;
+	roc_npa_aura_op_range_get;
 	roc_npa_aura_op_range_set;
 	roc_npa_ctx_dump;
 	roc_npa_dev_fini;
@@ -365,6 +366,7 @@ INTERNAL {
 	roc_npa_pool_create;
 	roc_npa_pool_destroy;
 	roc_npa_pool_op_pc_reset;
+	roc_npa_pool_op_range_set;
 	roc_npa_pool_range_update_check;
 	roc_npa_zero_aura_handle;
 	roc_npc_fini;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 1b6c4591bb..a1aeaee746 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -174,7 +174,7 @@ cnxk_mempool_populate(struct rte_mempool *mp, unsigned int max_objs,
 	plt_npa_dbg("requested objects %" PRIu64 ", possible objects %" PRIu64
 		    "", (uint64_t)max_objs, (uint64_t)num_elts);
 
-	roc_npa_aura_op_range_set(mp->pool_id, iova,
+	roc_npa_pool_op_range_set(mp->pool_id, iova,
 				  iova + num_elts * total_elt_sz);
 
 	if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
-- 
2.25.1


  parent reply	other threads:[~2023-05-23 12:48 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-11  7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
2023-04-11  7:55 ` [PATCH 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
2023-05-17 18:46   ` Jerin Jacob
2023-05-23  9:04 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23  9:04   ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23  9:04   ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-05-23  9:04   ` [PATCH v2 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
2023-05-23  9:04   ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
2023-05-24  9:33     ` Jerin Jacob
2023-05-23  9:13 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23  9:13   ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23  9:13   ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-05-23  9:27   ` Ashwin Sekhar T K [this message]
2023-05-23 10:54 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 10:54   ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23 10:54   ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-05-23 10:54   ` [PATCH v2 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
2023-05-23 10:54   ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230523092739.718214-1-asekhar@marvell.com \
    --to=asekhar@marvell.com \
    --cc=anoobj@marvell.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=hkalra@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=psatheesh@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).