* [PATCH 1/5] mempool/cnxk: use pool config to pass flags
2023-04-11 7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
@ 2023-04-11 7:55 ` Ashwin Sekhar T K
2023-04-11 7:55 ` [PATCH 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
` (6 subsequent siblings)
7 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-04-11 7:55 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh, Nithin Dabilpuram,
Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Use lower bits of pool_config to pass flags specific to
cnxk mempool PMD ops.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cnxk_mempool.h | 24 ++++++++++++++++++++++++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 17 ++++++++++-------
drivers/net/cnxk/cnxk_ethdev_sec.c | 25 ++++++-------------------
3 files changed, 40 insertions(+), 26 deletions(-)
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index 3405aa7663..fc2e4b5b70 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -7,6 +7,30 @@
#include <rte_mempool.h>
+enum cnxk_mempool_flags {
+ /* This flag is used to ensure that only aura zero is allocated.
+ * If aura zero is not available, then mempool creation fails.
+ */
+ CNXK_MEMPOOL_F_ZERO_AURA = RTE_BIT64(0),
+ /* Here the pool create will use the npa_aura_s structure passed
+ * as pool config to create the pool.
+ */
+ CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+};
+
+#define CNXK_MEMPOOL_F_MASK 0xFUL
+
+#define CNXK_MEMPOOL_FLAGS(_m) \
+ (PLT_U64_CAST((_m)->pool_config) & CNXK_MEMPOOL_F_MASK)
+#define CNXK_MEMPOOL_CONFIG(_m) \
+ (PLT_PTR_CAST(PLT_U64_CAST((_m)->pool_config) & ~CNXK_MEMPOOL_F_MASK))
+#define CNXK_MEMPOOL_SET_FLAGS(_m, _f) \
+ do { \
+ void *_c = CNXK_MEMPOOL_CONFIG(_m); \
+ uint64_t _flags = CNXK_MEMPOOL_FLAGS(_m) | (_f); \
+ (_m)->pool_config = PLT_PTR_CAST(PLT_U64_CAST(_c) | _flags); \
+ } while (0)
+
unsigned int cnxk_mempool_get_count(const struct rte_mempool *mp);
ssize_t cnxk_mempool_calc_mem_size(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 3769afd3d1..1b6c4591bb 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,7 +72,7 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
int
cnxk_mempool_alloc(struct rte_mempool *mp)
{
- uint32_t block_count, flags = 0;
+ uint32_t block_count, flags, roc_flags = 0;
uint64_t aura_handle = 0;
struct npa_aura_s aura;
struct npa_pool_s pool;
@@ -96,15 +96,18 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
pool.nat_align = 1;
pool.buf_offset = mp->header_size / ROC_ALIGN;
- /* Use driver specific mp->pool_config to override aura config */
- if (mp->pool_config != NULL)
- memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+ flags = CNXK_MEMPOOL_FLAGS(mp);
+ if (flags & CNXK_MEMPOOL_F_ZERO_AURA) {
+ roc_flags = ROC_NPA_ZERO_AURA_F;
+ } else if (flags & CNXK_MEMPOOL_F_CUSTOM_AURA) {
+ struct npa_aura_s *paura;
- if (aura.ena && aura.pool_addr == 0)
- flags = ROC_NPA_ZERO_AURA_F;
+ paura = CNXK_MEMPOOL_CONFIG(mp);
+ memcpy(&aura, paura, sizeof(struct npa_aura_s));
+ }
rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
- &pool, flags);
+ &pool, roc_flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index aa8a378a00..cd64daacc0 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -3,6 +3,7 @@
*/
#include <cnxk_ethdev.h>
+#include <cnxk_mempool.h>
#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
@@ -43,7 +44,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
{
const char *mp_name = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
- struct npa_aura_s *aura;
struct rte_mempool *mp;
uint16_t first_skip;
int rc;
@@ -65,7 +65,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
return -EINVAL;
}
- plt_free(mp->pool_config);
rte_mempool_free(mp);
*aura_handle = 0;
@@ -84,22 +83,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
return -EIO;
}
- /* Indicate to allocate zero aura */
- aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
- if (!aura) {
- rc = -ENOMEM;
- goto free_mp;
- }
- aura->ena = 1;
- if (!mempool_name)
- aura->pool_addr = 0;
- else
- aura->pool_addr = 1; /* Any non zero value, so that alloc from next free Index */
-
- rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(), aura);
+ rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
+ mempool_name ?
+ NULL : PLT_PTR_CAST(CNXK_MEMPOOL_F_ZERO_AURA));
if (rc) {
plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
- goto free_aura;
+ goto free_mp;
}
/* Init mempool private area */
@@ -113,15 +102,13 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
rc = rte_mempool_populate_default(mp);
if (rc < 0) {
plt_err("Failed to create inline meta pool, rc=%d", rc);
- goto free_aura;
+ goto free_mp;
}
rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
*aura_handle = mp->pool_id;
*mpool = (uintptr_t)mp;
return 0;
-free_aura:
- plt_free(aura);
free_mp:
rte_mempool_free(mp);
return rc;
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 2/5] common/cnxk: add NPA aura create/destroy ROC APIs
2023-04-11 7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
2023-04-11 7:55 ` [PATCH 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
@ 2023-04-11 7:55 ` Ashwin Sekhar T K
2023-04-11 7:55 ` [PATCH 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
` (5 subsequent siblings)
7 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-04-11 7:55 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: jerinj, pbhagavatula, psatheesh, asekhar, anoobj, gakhil, hkalra
Add ROC APIs which allows to create NPA auras independently and
attach it to an existing NPA pool. Also add API to destroy
NPA auras independently.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_npa.c | 219 ++++++++++++++++++++++++++++++++
drivers/common/cnxk/roc_npa.h | 4 +
drivers/common/cnxk/version.map | 2 +
3 files changed, 225 insertions(+)
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 69c3d8d250..a07f37d606 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -85,6 +85,36 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura
return rc;
}
+static int
+npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
+{
+ struct npa_aq_enq_req *aura_init_req;
+ struct npa_aq_enq_rsp *aura_init_rsp;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ mbox = mbox_get(m_box);
+ aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (aura_init_req == NULL)
+ goto exit;
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_INIT;
+ mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+ rc = mbox_process_msg(mbox, (void **)&aura_init_rsp);
+ if (rc < 0)
+ goto exit;
+
+ if (aura_init_rsp->hdr.rc == 0)
+ rc = 0;
+ else
+ rc = NPA_ERR_AURA_POOL_INIT;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
static int
npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
{
@@ -156,6 +186,54 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
return rc;
}
+static int
+npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
+{
+ struct npa_aq_enq_req *aura_req;
+ struct npa_aq_enq_rsp *aura_rsp;
+ struct ndc_sync_op *ndc_req;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ /* Procedure for disabling an aura/pool */
+ plt_delay_us(10);
+
+ mbox = mbox_get(m_box);
+ aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (aura_req == NULL)
+ goto exit;
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+ aura_req->aura.ena = 0;
+ aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+ rc = mbox_process_msg(mbox, (void **)&aura_rsp);
+ if (rc < 0)
+ goto exit;
+
+ if (aura_rsp->hdr.rc != 0)
+ return NPA_ERR_AURA_POOL_FINI;
+
+ /* Sync NDC-NPA for LF */
+ ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+ if (ndc_req == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+ ndc_req->npa_lf_sync = 1;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+ rc = NPA_ERR_AURA_POOL_FINI;
+ goto exit;
+ }
+ rc = 0;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
int
roc_npa_pool_op_pc_reset(uint64_t aura_handle)
{
@@ -493,6 +571,108 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
return rc;
}
+static int
+npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id,
+ struct npa_aura_s *aura, uint64_t *aura_handle, uint32_t flags)
+{
+ int rc, aura_id;
+
+ /* Sanity check */
+ if (!lf || !aura || !aura_handle)
+ return NPA_ERR_PARAM;
+
+ roc_npa_dev_lock();
+ /* Get aura_id from resource bitmap */
+ aura_id = find_free_aura(lf, flags);
+ if (aura_id < 0) {
+ roc_npa_dev_unlock();
+ return NPA_ERR_AURA_ID_ALLOC;
+ }
+
+ /* Mark aura as reserved */
+ plt_bitmap_clear(lf->npa_bmp, aura_id);
+
+ roc_npa_dev_unlock();
+ rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
+ aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
+ NPA_ERR_AURA_ID_ALLOC :
+ 0;
+ if (rc)
+ goto exit;
+
+ /* Update aura fields */
+ aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
+ aura->ena = 1;
+ aura->shift = plt_log2_u32(block_count);
+ aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
+ aura->limit = block_count;
+ aura->pool_caching = 1;
+ aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+ aura->avg_con = 0;
+ /* Many to one reduction */
+ aura->err_qint_idx = aura_id % lf->qints;
+
+ /* Issue AURA_INIT and POOL_INIT op */
+ rc = npa_aura_init(lf->mbox, aura_id, aura);
+ if (rc)
+ return rc;
+
+ *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base);
+
+ return 0;
+
+exit:
+ return rc;
+}
+
+int
+roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
+ struct npa_aura_s *aura, int pool_id, uint32_t flags)
+{
+ struct npa_aura_s defaura;
+ struct idev_cfg *idev;
+ struct npa_lf *lf;
+ int rc;
+
+ lf = idev_npa_obj_get();
+ if (lf == NULL) {
+ rc = NPA_ERR_DEVICE_NOT_BOUNDED;
+ goto error;
+ }
+
+ idev = idev_get_cfg();
+ if (idev == NULL) {
+ rc = NPA_ERR_ALLOC;
+ goto error;
+ }
+
+ if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) {
+ rc = NPA_ERR_ALLOC;
+ goto error;
+ }
+
+ if (aura == NULL) {
+ memset(&defaura, 0, sizeof(struct npa_aura_s));
+ aura = &defaura;
+ }
+
+ rc = npa_aura_alloc(lf, block_count, pool_id, aura, aura_handle, flags);
+ if (rc) {
+ plt_err("Failed to alloc aura rc=%d", rc);
+ goto error;
+ }
+
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, *aura_handle);
+
+ /* Just hold the reference of the object */
+ __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);
+error:
+ return rc;
+}
+
int
roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)
{
@@ -561,6 +741,45 @@ roc_npa_pool_destroy(uint64_t aura_handle)
return rc;
}
+static int
+npa_aura_free(struct npa_lf *lf, uint64_t aura_handle)
+{
+ int aura_id, rc;
+
+ if (!lf || !aura_handle)
+ return NPA_ERR_PARAM;
+
+ aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ rc = npa_aura_fini(lf->mbox, aura_id);
+
+ if (rc)
+ return rc;
+
+ memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr));
+
+ roc_npa_dev_lock();
+ plt_bitmap_set(lf->npa_bmp, aura_id);
+ roc_npa_dev_unlock();
+
+ return rc;
+}
+
+int
+roc_npa_aura_destroy(uint64_t aura_handle)
+{
+ struct npa_lf *lf = idev_npa_obj_get();
+ int rc = 0;
+
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
+ rc = npa_aura_free(lf, aura_handle);
+ if (rc)
+ plt_err("Failed to destroy aura rc=%d", rc);
+
+ /* Release the reference of npa */
+ rc |= npa_lf_fini();
+ return rc;
+}
+
int
roc_npa_pool_range_update_check(uint64_t aura_handle)
{
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index dd588b0322..df15dabe92 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,10 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
+ struct npa_aura_s *aura, int pool_id,
+ uint32_t flags);
+int __roc_api roc_npa_aura_destroy(uint64_t aura_handle);
uint64_t __roc_api roc_npa_zero_aura_handle(void);
int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index b298a21b84..9414b55e9c 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -347,6 +347,8 @@ INTERNAL {
roc_nix_vlan_mcam_entry_write;
roc_nix_vlan_strip_vtag_ena_dis;
roc_nix_vlan_tpid_set;
+ roc_npa_aura_create;
+ roc_npa_aura_destroy;
roc_npa_buf_type_mask;
roc_npa_buf_type_limit_get;
roc_npa_buf_type_update;
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 3/5] mempool/cnxk: add NPA aura range get/set APIs
2023-04-11 7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
2023-04-11 7:55 ` [PATCH 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-04-11 7:55 ` [PATCH 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
@ 2023-04-11 7:55 ` Ashwin Sekhar T K
2023-04-11 7:55 ` [PATCH 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
` (4 subsequent siblings)
7 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-04-11 7:55 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.
Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.
The existing aura range set functionality has been moved
as a pool range set API.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_nix_queue.c | 2 +-
drivers/common/cnxk/roc_npa.c | 35 ++++++++++++++++++++++++-
drivers/common/cnxk/roc_npa.h | 6 +++++
drivers/common/cnxk/roc_sso.c | 2 +-
drivers/common/cnxk/version.map | 2 ++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 2 +-
6 files changed, 45 insertions(+), 4 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
goto npa_fail;
}
- roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+ roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index a07f37d606..42846ac4ec 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
}
void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
uint64_t end_iova)
{
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
+ /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
}
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+ uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aura_lim *lim;
+
+ PLT_ASSERT(lf);
+ lim = lf->aura_lim;
+
+ /* Change only the bookkeeping in software */
+ lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+ lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+ uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_aura_lim *lim;
+ struct npa_lf *lf;
+
+ lf = idev_npa_obj_get();
+ PLT_ASSERT(lf);
+
+ lim = lf->aura_lim;
+ *start_iova = lim[aura_id].ptr_start;
+ *end_iova = lim[aura_id].ptr_end;
+}
+
static int
npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura,
struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+ uint64_t *start_iova,
+ uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+ uint64_t start_iova,
+ uint64_t end_iova);
int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
struct npa_aura_s *aura, int pool_id,
uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
- roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+ roc_npa_pool_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) !=
xaq->nb_xaq) {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 9414b55e9c..5281c71550 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -354,6 +354,7 @@ INTERNAL {
roc_npa_buf_type_update;
roc_npa_aura_drop_set;
roc_npa_aura_limit_modify;
+ roc_npa_aura_op_range_get;
roc_npa_aura_op_range_set;
roc_npa_ctx_dump;
roc_npa_dev_fini;
@@ -365,6 +366,7 @@ INTERNAL {
roc_npa_pool_create;
roc_npa_pool_destroy;
roc_npa_pool_op_pc_reset;
+ roc_npa_pool_op_range_set;
roc_npa_pool_range_update_check;
roc_npa_zero_aura_handle;
roc_npc_fini;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 1b6c4591bb..a1aeaee746 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -174,7 +174,7 @@ cnxk_mempool_populate(struct rte_mempool *mp, unsigned int max_objs,
plt_npa_dbg("requested objects %" PRIu64 ", possible objects %" PRIu64
"", (uint64_t)max_objs, (uint64_t)num_elts);
- roc_npa_aura_op_range_set(mp->pool_id, iova,
+ roc_npa_pool_op_range_set(mp->pool_id, iova,
iova + num_elts * total_elt_sz);
if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 4/5] mempool/cnxk: add hwpool ops
2023-04-11 7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
` (2 preceding siblings ...)
2023-04-11 7:55 ` [PATCH 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
@ 2023-04-11 7:55 ` Ashwin Sekhar T K
2023-04-11 7:55 ` [PATCH 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
` (3 subsequent siblings)
7 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-04-11 7:55 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, skori, skoteshwar, kirankumark, psatheesh, anoobj,
gakhil, hkalra, ndabilpuram
Add hwpool ops which can used to create a rte_mempool that attaches
to another rte_mempool. The hwpool will not have its own buffers and
will have a dummy populate callback. Only an NPA aura will be allocated
for this rte_mempool. The buffers will be allocate from the NPA pool
of the attached rte_mempool.
Only mbuf objects are supported in hwpool. Generic objects are not
supported. Note that this pool will not have any range check enabled.
So user will be able to free any pointer into this pool. HW will not
throw error interrupts if invalid buffers are passed. So user must be
careful when using this pool.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cn10k_hwpool_ops.c | 211 ++++++++++++++++++++++++
drivers/mempool/cnxk/cnxk_mempool.h | 4 +
drivers/mempool/cnxk/meson.build | 1 +
3 files changed, 216 insertions(+)
create mode 100644 drivers/mempool/cnxk/cn10k_hwpool_ops.c
diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
new file mode 100644
index 0000000000..9238765155
--- /dev/null
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include <rte_mempool.h>
+
+#include "roc_api.h"
+#include "cnxk_mempool.h"
+
+#define CN10K_HWPOOL_MEM_SIZE 128
+
+static int __rte_hot
+cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int n)
+{
+ struct rte_mempool *mp;
+ unsigned int index;
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ /* Ensure mbuf init changes are written before the free pointers
+ * are enqueued to the stack.
+ */
+ rte_io_wmb();
+ for (index = 0; index < n; index++) {
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mbuf *m;
+
+ m = PLT_PTR_CAST(obj_table[index]);
+ /* Update mempool information in the mbuf */
+ hdr = rte_mempool_get_header(obj_table[index]);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (hdr->mp != m->pool || hdr->mp != hp)
+ plt_err("Pool Header Mismatch");
+#endif
+ m->pool = mp;
+ hdr->mp = mp;
+ roc_npa_aura_op_free(hp->pool_id, 0,
+ (uint64_t)obj_table[index]);
+ }
+
+ return 0;
+}
+
+static int __rte_hot
+cn10k_hwpool_deq(struct rte_mempool *hp, void **obj_table, unsigned int n)
+{
+ unsigned int index;
+ uint64_t obj;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool *mp;
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+#endif
+
+ for (index = 0; index < n; index++, obj_table++) {
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mbuf *m;
+ int retry = 4;
+
+ /* Retry few times before failing */
+ do {
+ obj = roc_npa_aura_op_alloc(hp->pool_id, 0);
+ } while (retry-- && (obj == 0));
+
+ if (obj == 0) {
+ cn10k_hwpool_enq(hp, obj_table - index, index);
+ return -ENOENT;
+ }
+ /* Update mempool information in the mbuf */
+ hdr = rte_mempool_get_header(PLT_PTR_CAST(obj));
+ m = PLT_PTR_CAST(obj);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (hdr->mp != m->pool || hdr->mp != mp)
+ plt_err("Pool Header Mismatch");
+#endif
+ m->pool = hp;
+ hdr->mp = hp;
+ *obj_table = (void *)obj;
+ }
+
+ return 0;
+}
+
+static unsigned int
+cn10k_hwpool_get_count(const struct rte_mempool *hp)
+{
+ return (unsigned int)roc_npa_aura_op_available(hp->pool_id);
+}
+
+static int
+cn10k_hwpool_alloc(struct rte_mempool *hp)
+{
+ uint64_t aura_handle = 0;
+ struct rte_mempool *mp;
+ uint32_t pool_id;
+ int rc;
+
+ if (hp->cache_size) {
+ plt_err("Hwpool does not support cache");
+ return -EINVAL;
+ }
+
+ if (CNXK_MEMPOOL_FLAGS(hp)) {
+ plt_err("Flags must not be passed to hwpool ops");
+ return -EINVAL;
+ }
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ if (!mp) {
+ plt_err("Invalid rte_mempool passed as pool_config");
+ return -EINVAL;
+ }
+ if (mp->cache_size) {
+ plt_err("Hwpool does not support attaching to pool with cache");
+ return -EINVAL;
+ }
+
+ if (hp->elt_size != mp->elt_size ||
+ hp->header_size != mp->header_size ||
+ hp->trailer_size != mp->trailer_size || hp->size != mp->size) {
+ plt_err("Hwpool parameters matching with master pool");
+ return -EINVAL;
+ }
+
+ /* Create the NPA aura */
+ pool_id = roc_npa_aura_handle_to_aura(mp->pool_id);
+ rc = roc_npa_aura_create(&aura_handle, hp->size, NULL, (int)pool_id, 0);
+ if (rc) {
+ plt_err("Failed to create aura rc=%d", rc);
+ return rc;
+ }
+
+ /* Set the flags for the hardware pool */
+ CNXK_MEMPOOL_SET_FLAGS(hp, CNXK_MEMPOOL_F_IS_HWPOOL);
+ hp->pool_id = aura_handle;
+ plt_npa_dbg("aura_handle=0x%" PRIx64, aura_handle);
+
+ return 0;
+}
+
+static void
+cn10k_hwpool_free(struct rte_mempool *hp)
+{
+ int rc = 0;
+
+ plt_npa_dbg("aura_handle=0x%" PRIx64, hp->pool_id);
+ /* It can happen that rte_mempool_free() is called immediately after
+ * rte_mempool_create_empty(). In such cases the NPA pool will not be
+ * allocated.
+ */
+ if (roc_npa_aura_handle_to_base(hp->pool_id) == 0)
+ return;
+
+ rc = roc_npa_aura_destroy(hp->pool_id);
+ if (rc)
+ plt_err("Failed to destroy aura rc=%d", rc);
+}
+
+static ssize_t
+cn10k_hwpool_calc_mem_size(const struct rte_mempool *hp, uint32_t obj_num,
+ uint32_t pg_shift, size_t *min_chunk_size,
+ size_t *align)
+{
+ RTE_SET_USED(hp);
+ RTE_SET_USED(obj_num);
+ RTE_SET_USED(pg_shift);
+ *min_chunk_size = CN10K_HWPOOL_MEM_SIZE;
+ *align = CN10K_HWPOOL_MEM_SIZE;
+ /* Return a minimum mem size so that hwpool can also be initialized just
+ * like a regular pool. This memzone will not be used anywhere.
+ */
+ return CN10K_HWPOOL_MEM_SIZE;
+}
+
+static int
+cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ uint64_t start_iova, end_iova;
+ struct rte_mempool *mp;
+
+ RTE_SET_USED(max_objs);
+ RTE_SET_USED(vaddr);
+ RTE_SET_USED(iova);
+ RTE_SET_USED(len);
+ RTE_SET_USED(obj_cb);
+ RTE_SET_USED(obj_cb_arg);
+ /* HW pools does not require populating anything as these pools are
+ * only associated with NPA aura. The NPA pool being used is that of
+ * another rte_mempool. Only copy the iova range from the aura of
+ * the other rte_mempool to this pool's aura.
+ */
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ roc_npa_aura_op_range_get(mp->pool_id, &start_iova, &end_iova);
+ roc_npa_aura_op_range_set(hp->pool_id, start_iova, end_iova);
+
+ return hp->size;
+}
+
+static struct rte_mempool_ops cn10k_hwpool_ops = {
+ .name = "cn10k_hwpool_ops",
+ .alloc = cn10k_hwpool_alloc,
+ .free = cn10k_hwpool_free,
+ .enqueue = cn10k_hwpool_enq,
+ .dequeue = cn10k_hwpool_deq,
+ .get_count = cn10k_hwpool_get_count,
+ .calc_mem_size = cn10k_hwpool_calc_mem_size,
+ .populate = cn10k_hwpool_populate,
+};
+
+RTE_MEMPOOL_REGISTER_OPS(cn10k_hwpool_ops);
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index fc2e4b5b70..4ca05d53e1 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -16,6 +16,10 @@ enum cnxk_mempool_flags {
* as pool config to create the pool.
*/
CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+ /* This flag indicates whether the pool is a hardware pool or not.
+ * This flag is set by the driver.
+ */
+ CNXK_MEMPOOL_F_IS_HWPOOL = RTE_BIT64(2),
};
#define CNXK_MEMPOOL_F_MASK 0xFUL
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
index 50856ecde8..ce152bedd2 100644
--- a/drivers/mempool/cnxk/meson.build
+++ b/drivers/mempool/cnxk/meson.build
@@ -14,6 +14,7 @@ sources = files(
'cnxk_mempool_telemetry.c',
'cn9k_mempool_ops.c',
'cn10k_mempool_ops.c',
+ 'cn10k_hwpool_ops.c',
)
deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH 5/5] mempool/cnxk: add support for exchanging mbufs between pools
2023-04-11 7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
` (3 preceding siblings ...)
2023-04-11 7:55 ` [PATCH 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
@ 2023-04-11 7:55 ` Ashwin Sekhar T K
2023-05-17 18:46 ` Jerin Jacob
2023-05-23 9:04 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
` (2 subsequent siblings)
7 siblings, 1 reply; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-04-11 7:55 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, skori, skoteshwar, kirankumark, psatheesh, anoobj,
gakhil, hkalra, ndabilpuram
Add the following cnxk mempool PMD APIs to facilitate exchanging mbufs
between pools.
* rte_pmd_cnxk_mempool_is_hwpool() - Allows user to check whether a pool
is hwpool or not.
* rte_pmd_cnxk_mempool_range_check_disable() - Disables range checking on
any rte_mempool.
* rte_pmd_cnxk_mempool_mbuf_exchange() - Exchanges mbufs between any two
rte_mempool where the range check is disabled.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cn10k_hwpool_ops.c | 63 ++++++++++++++++++++-
drivers/mempool/cnxk/cnxk_mempool.h | 4 ++
drivers/mempool/cnxk/meson.build | 1 +
drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h | 56 ++++++++++++++++++
drivers/mempool/cnxk/version.map | 10 ++++
5 files changed, 133 insertions(+), 1 deletion(-)
create mode 100644 drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
create mode 100644 drivers/mempool/cnxk/version.map
diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
index 9238765155..b234481ec1 100644
--- a/drivers/mempool/cnxk/cn10k_hwpool_ops.c
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -3,11 +3,14 @@
*/
#include <rte_mempool.h>
+#include <rte_pmd_cnxk_mempool.h>
#include "roc_api.h"
#include "cnxk_mempool.h"
-#define CN10K_HWPOOL_MEM_SIZE 128
+#define CN10K_HWPOOL_MEM_SIZE 128
+#define CN10K_NPA_IOVA_RANGE_MIN 0x0
+#define CN10K_NPA_IOVA_RANGE_MAX 0x1fffffffffff80
static int __rte_hot
cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int n)
@@ -197,6 +200,64 @@ cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int max_objs,
return hp->size;
}
+int
+rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1, struct rte_mbuf *m2)
+{
+ struct rte_mempool_objhdr *hdr;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(CNXK_MEMPOOL_FLAGS(m1->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK) ||
+ !(CNXK_MEMPOOL_FLAGS(m2->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)) {
+ plt_err("Pools must have range check disabled");
+ return -EINVAL;
+ }
+ if (m1->pool->elt_size != m2->pool->elt_size ||
+ m1->pool->header_size != m2->pool->header_size ||
+ m1->pool->trailer_size != m2->pool->trailer_size ||
+ m1->pool->size != m2->pool->size) {
+ plt_err("Parameters of pools involved in exchange does not match");
+ return -EINVAL;
+ }
+#endif
+ RTE_SWAP(m1->pool, m2->pool);
+ hdr = rte_mempool_get_header(m1);
+ hdr->mp = m1->pool;
+ hdr = rte_mempool_get_header(m2);
+ hdr->mp = m2->pool;
+ return 0;
+}
+
+int
+rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp)
+{
+ return !!(CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_IS_HWPOOL);
+}
+
+int
+rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp)
+{
+ if (rte_pmd_cnxk_mempool_is_hwpool(mp)) {
+ /* Disable only aura range check for hardware pools */
+ roc_npa_aura_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+ CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+ mp = CNXK_MEMPOOL_CONFIG(mp);
+ }
+
+ /* No need to disable again if already disabled */
+ if (CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)
+ return 0;
+
+ /* Disable aura/pool range check */
+ roc_npa_pool_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+ if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
+ return -EBUSY;
+
+ CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+ return 0;
+}
+
static struct rte_mempool_ops cn10k_hwpool_ops = {
.name = "cn10k_hwpool_ops",
.alloc = cn10k_hwpool_alloc,
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index 4ca05d53e1..669e617952 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -20,6 +20,10 @@ enum cnxk_mempool_flags {
* This flag is set by the driver.
*/
CNXK_MEMPOOL_F_IS_HWPOOL = RTE_BIT64(2),
+ /* This flag indicates whether range check has been disabled for
+ * the pool. This flag is set by the driver.
+ */
+ CNXK_MEMPOOL_F_NO_RANGE_CHECK = RTE_BIT64(3),
};
#define CNXK_MEMPOOL_F_MASK 0xFUL
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
index ce152bedd2..e388cce26a 100644
--- a/drivers/mempool/cnxk/meson.build
+++ b/drivers/mempool/cnxk/meson.build
@@ -17,5 +17,6 @@ sources = files(
'cn10k_hwpool_ops.c',
)
+headers = files('rte_pmd_cnxk_mempool.h')
deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
require_iova_in_mbuf = false
diff --git a/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h b/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
new file mode 100644
index 0000000000..b040d0414f
--- /dev/null
+++ b/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+/**
+ * @file rte_pmd_cnxk.h
+ * Marvell CNXK Mempool PMD specific functions.
+ *
+ **/
+
+#ifndef _PMD_CNXK_MEMPOOL_H_
+#define _PMD_CNXK_MEMPOOL_H_
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+/**
+ * Exchange mbufs between two mempools.
+ *
+ * @param m1
+ * First mbuf
+ * @param m2
+ * Second mbuf
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1,
+ struct rte_mbuf *m2);
+
+/**
+ * Check whether a mempool is a hwpool.
+ *
+ * @param mp
+ * Mempool to check.
+ *
+ * @return
+ * 1 if mp is a hwpool, 0 otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp);
+
+/**
+ * Disable buffer address range check on a mempool.
+ *
+ * @param mp
+ * Mempool to disable range check on.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp);
+
+#endif /* _PMD_CNXK_MEMPOOL_H_ */
diff --git a/drivers/mempool/cnxk/version.map b/drivers/mempool/cnxk/version.map
new file mode 100644
index 0000000000..755731e3b5
--- /dev/null
+++ b/drivers/mempool/cnxk/version.map
@@ -0,0 +1,10 @@
+ DPDK_23 {
+ local: *;
+ };
+
+ EXPERIMENTAL {
+ global:
+ rte_pmd_cnxk_mempool_is_hwpool;
+ rte_pmd_cnxk_mempool_mbuf_exchange;
+ rte_pmd_cnxk_mempool_range_check_disable;
+ };
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH 5/5] mempool/cnxk: add support for exchanging mbufs between pools
2023-04-11 7:55 ` [PATCH 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
@ 2023-05-17 18:46 ` Jerin Jacob
0 siblings, 0 replies; 22+ messages in thread
From: Jerin Jacob @ 2023-05-17 18:46 UTC (permalink / raw)
To: Ashwin Sekhar T K
Cc: dev, Pavan Nikhilesh, jerinj, skori, skoteshwar, kirankumark,
psatheesh, anoobj, gakhil, hkalra, ndabilpuram
On Tue, Apr 11, 2023 at 1:26 PM Ashwin Sekhar T K <asekhar@marvell.com> wrote:
>
> Add the following cnxk mempool PMD APIs to facilitate exchanging mbufs
> between pools.
> * rte_pmd_cnxk_mempool_is_hwpool() - Allows user to check whether a pool
> is hwpool or not.
> * rte_pmd_cnxk_mempool_range_check_disable() - Disables range checking on
> any rte_mempool.
> * rte_pmd_cnxk_mempool_mbuf_exchange() - Exchanges mbufs between any two
> rte_mempool where the range check is disabled.
>
> Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
> ---
> drivers/mempool/cnxk/cn10k_hwpool_ops.c | 63 ++++++++++++++++++++-
> drivers/mempool/cnxk/cnxk_mempool.h | 4 ++
> drivers/mempool/cnxk/meson.build | 1 +
> drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h | 56 ++++++++++++++++++
Update to doc/api/doxy-api-index.md is missing.
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags
2023-04-11 7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
` (4 preceding siblings ...)
2023-04-11 7:55 ` [PATCH 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
@ 2023-05-23 9:04 ` Ashwin Sekhar T K
2023-05-23 9:04 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
` (3 more replies)
2023-05-23 9:13 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
7 siblings, 4 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:04 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh, Nithin Dabilpuram,
Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Use lower bits of pool_config to pass flags specific to
cnxk mempool PMD ops.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cnxk_mempool.h | 24 ++++++++++++++++++++++++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 17 ++++++++++-------
drivers/net/cnxk/cnxk_ethdev_sec.c | 25 ++++++-------------------
3 files changed, 40 insertions(+), 26 deletions(-)
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index 3405aa7663..fc2e4b5b70 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -7,6 +7,30 @@
#include <rte_mempool.h>
+enum cnxk_mempool_flags {
+ /* This flag is used to ensure that only aura zero is allocated.
+ * If aura zero is not available, then mempool creation fails.
+ */
+ CNXK_MEMPOOL_F_ZERO_AURA = RTE_BIT64(0),
+ /* Here the pool create will use the npa_aura_s structure passed
+ * as pool config to create the pool.
+ */
+ CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+};
+
+#define CNXK_MEMPOOL_F_MASK 0xFUL
+
+#define CNXK_MEMPOOL_FLAGS(_m) \
+ (PLT_U64_CAST((_m)->pool_config) & CNXK_MEMPOOL_F_MASK)
+#define CNXK_MEMPOOL_CONFIG(_m) \
+ (PLT_PTR_CAST(PLT_U64_CAST((_m)->pool_config) & ~CNXK_MEMPOOL_F_MASK))
+#define CNXK_MEMPOOL_SET_FLAGS(_m, _f) \
+ do { \
+ void *_c = CNXK_MEMPOOL_CONFIG(_m); \
+ uint64_t _flags = CNXK_MEMPOOL_FLAGS(_m) | (_f); \
+ (_m)->pool_config = PLT_PTR_CAST(PLT_U64_CAST(_c) | _flags); \
+ } while (0)
+
unsigned int cnxk_mempool_get_count(const struct rte_mempool *mp);
ssize_t cnxk_mempool_calc_mem_size(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 3769afd3d1..1b6c4591bb 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,7 +72,7 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
int
cnxk_mempool_alloc(struct rte_mempool *mp)
{
- uint32_t block_count, flags = 0;
+ uint32_t block_count, flags, roc_flags = 0;
uint64_t aura_handle = 0;
struct npa_aura_s aura;
struct npa_pool_s pool;
@@ -96,15 +96,18 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
pool.nat_align = 1;
pool.buf_offset = mp->header_size / ROC_ALIGN;
- /* Use driver specific mp->pool_config to override aura config */
- if (mp->pool_config != NULL)
- memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+ flags = CNXK_MEMPOOL_FLAGS(mp);
+ if (flags & CNXK_MEMPOOL_F_ZERO_AURA) {
+ roc_flags = ROC_NPA_ZERO_AURA_F;
+ } else if (flags & CNXK_MEMPOOL_F_CUSTOM_AURA) {
+ struct npa_aura_s *paura;
- if (aura.ena && aura.pool_addr == 0)
- flags = ROC_NPA_ZERO_AURA_F;
+ paura = CNXK_MEMPOOL_CONFIG(mp);
+ memcpy(&aura, paura, sizeof(struct npa_aura_s));
+ }
rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
- &pool, flags);
+ &pool, roc_flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index aa8a378a00..cd64daacc0 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -3,6 +3,7 @@
*/
#include <cnxk_ethdev.h>
+#include <cnxk_mempool.h>
#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
@@ -43,7 +44,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
{
const char *mp_name = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
- struct npa_aura_s *aura;
struct rte_mempool *mp;
uint16_t first_skip;
int rc;
@@ -65,7 +65,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
return -EINVAL;
}
- plt_free(mp->pool_config);
rte_mempool_free(mp);
*aura_handle = 0;
@@ -84,22 +83,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
return -EIO;
}
- /* Indicate to allocate zero aura */
- aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
- if (!aura) {
- rc = -ENOMEM;
- goto free_mp;
- }
- aura->ena = 1;
- if (!mempool_name)
- aura->pool_addr = 0;
- else
- aura->pool_addr = 1; /* Any non zero value, so that alloc from next free Index */
-
- rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(), aura);
+ rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
+ mempool_name ?
+ NULL : PLT_PTR_CAST(CNXK_MEMPOOL_F_ZERO_AURA));
if (rc) {
plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
- goto free_aura;
+ goto free_mp;
}
/* Init mempool private area */
@@ -113,15 +102,13 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
rc = rte_mempool_populate_default(mp);
if (rc < 0) {
plt_err("Failed to create inline meta pool, rc=%d", rc);
- goto free_aura;
+ goto free_mp;
}
rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
*aura_handle = mp->pool_id;
*mpool = (uintptr_t)mp;
return 0;
-free_aura:
- plt_free(aura);
free_mp:
rte_mempool_free(mp);
return rc;
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs
2023-05-23 9:04 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
@ 2023-05-23 9:04 ` Ashwin Sekhar T K
2023-05-23 9:04 ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
` (2 subsequent siblings)
3 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:04 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: jerinj, pbhagavatula, psatheesh, asekhar, anoobj, gakhil, hkalra
Add ROC APIs which allows to create NPA auras independently and
attach it to an existing NPA pool. Also add API to destroy
NPA auras independently.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_npa.c | 219 ++++++++++++++++++++++++++++++++
drivers/common/cnxk/roc_npa.h | 4 +
drivers/common/cnxk/version.map | 2 +
3 files changed, 225 insertions(+)
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 20637fbf65..e3c925ddd1 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -85,6 +85,36 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura
return rc;
}
+static int
+npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
+{
+ struct npa_aq_enq_req *aura_init_req;
+ struct npa_aq_enq_rsp *aura_init_rsp;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ mbox = mbox_get(m_box);
+ aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (aura_init_req == NULL)
+ goto exit;
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_INIT;
+ mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+ rc = mbox_process_msg(mbox, (void **)&aura_init_rsp);
+ if (rc < 0)
+ goto exit;
+
+ if (aura_init_rsp->hdr.rc == 0)
+ rc = 0;
+ else
+ rc = NPA_ERR_AURA_POOL_INIT;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
static int
npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
{
@@ -156,6 +186,54 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
return rc;
}
+static int
+npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
+{
+ struct npa_aq_enq_req *aura_req;
+ struct npa_aq_enq_rsp *aura_rsp;
+ struct ndc_sync_op *ndc_req;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ /* Procedure for disabling an aura/pool */
+ plt_delay_us(10);
+
+ mbox = mbox_get(m_box);
+ aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (aura_req == NULL)
+ goto exit;
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+ aura_req->aura.ena = 0;
+ aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+ rc = mbox_process_msg(mbox, (void **)&aura_rsp);
+ if (rc < 0)
+ goto exit;
+
+ if (aura_rsp->hdr.rc != 0)
+ return NPA_ERR_AURA_POOL_FINI;
+
+ /* Sync NDC-NPA for LF */
+ ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+ if (ndc_req == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+ ndc_req->npa_lf_sync = 1;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+ rc = NPA_ERR_AURA_POOL_FINI;
+ goto exit;
+ }
+ rc = 0;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
int
roc_npa_pool_op_pc_reset(uint64_t aura_handle)
{
@@ -493,6 +571,108 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
return rc;
}
+static int
+npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id,
+ struct npa_aura_s *aura, uint64_t *aura_handle, uint32_t flags)
+{
+ int rc, aura_id;
+
+ /* Sanity check */
+ if (!lf || !aura || !aura_handle)
+ return NPA_ERR_PARAM;
+
+ roc_npa_dev_lock();
+ /* Get aura_id from resource bitmap */
+ aura_id = find_free_aura(lf, flags);
+ if (aura_id < 0) {
+ roc_npa_dev_unlock();
+ return NPA_ERR_AURA_ID_ALLOC;
+ }
+
+ /* Mark aura as reserved */
+ plt_bitmap_clear(lf->npa_bmp, aura_id);
+
+ roc_npa_dev_unlock();
+ rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
+ aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
+ NPA_ERR_AURA_ID_ALLOC :
+ 0;
+ if (rc)
+ goto exit;
+
+ /* Update aura fields */
+ aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
+ aura->ena = 1;
+ aura->shift = plt_log2_u32(block_count);
+ aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
+ aura->limit = block_count;
+ aura->pool_caching = 1;
+ aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+ aura->avg_con = 0;
+ /* Many to one reduction */
+ aura->err_qint_idx = aura_id % lf->qints;
+
+ /* Issue AURA_INIT and POOL_INIT op */
+ rc = npa_aura_init(lf->mbox, aura_id, aura);
+ if (rc)
+ return rc;
+
+ *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base);
+
+ return 0;
+
+exit:
+ return rc;
+}
+
+int
+roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
+ struct npa_aura_s *aura, int pool_id, uint32_t flags)
+{
+ struct npa_aura_s defaura;
+ struct idev_cfg *idev;
+ struct npa_lf *lf;
+ int rc;
+
+ lf = idev_npa_obj_get();
+ if (lf == NULL) {
+ rc = NPA_ERR_DEVICE_NOT_BOUNDED;
+ goto error;
+ }
+
+ idev = idev_get_cfg();
+ if (idev == NULL) {
+ rc = NPA_ERR_ALLOC;
+ goto error;
+ }
+
+ if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) {
+ rc = NPA_ERR_ALLOC;
+ goto error;
+ }
+
+ if (aura == NULL) {
+ memset(&defaura, 0, sizeof(struct npa_aura_s));
+ aura = &defaura;
+ }
+
+ rc = npa_aura_alloc(lf, block_count, pool_id, aura, aura_handle, flags);
+ if (rc) {
+ plt_err("Failed to alloc aura rc=%d", rc);
+ goto error;
+ }
+
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, *aura_handle);
+
+ /* Just hold the reference of the object */
+ __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);
+error:
+ return rc;
+}
+
int
roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)
{
@@ -561,6 +741,45 @@ roc_npa_pool_destroy(uint64_t aura_handle)
return rc;
}
+static int
+npa_aura_free(struct npa_lf *lf, uint64_t aura_handle)
+{
+ int aura_id, rc;
+
+ if (!lf || !aura_handle)
+ return NPA_ERR_PARAM;
+
+ aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ rc = npa_aura_fini(lf->mbox, aura_id);
+
+ if (rc)
+ return rc;
+
+ memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr));
+
+ roc_npa_dev_lock();
+ plt_bitmap_set(lf->npa_bmp, aura_id);
+ roc_npa_dev_unlock();
+
+ return rc;
+}
+
+int
+roc_npa_aura_destroy(uint64_t aura_handle)
+{
+ struct npa_lf *lf = idev_npa_obj_get();
+ int rc = 0;
+
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
+ rc = npa_aura_free(lf, aura_handle);
+ if (rc)
+ plt_err("Failed to destroy aura rc=%d", rc);
+
+ /* Release the reference of npa */
+ rc |= npa_lf_fini();
+ return rc;
+}
+
int
roc_npa_pool_range_update_check(uint64_t aura_handle)
{
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index dd588b0322..df15dabe92 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,10 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
+ struct npa_aura_s *aura, int pool_id,
+ uint32_t flags);
+int __roc_api roc_npa_aura_destroy(uint64_t aura_handle);
uint64_t __roc_api roc_npa_zero_aura_handle(void);
int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index b298a21b84..9414b55e9c 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -347,6 +347,8 @@ INTERNAL {
roc_nix_vlan_mcam_entry_write;
roc_nix_vlan_strip_vtag_ena_dis;
roc_nix_vlan_tpid_set;
+ roc_npa_aura_create;
+ roc_npa_aura_destroy;
roc_npa_buf_type_mask;
roc_npa_buf_type_limit_get;
roc_npa_buf_type_update;
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs
2023-05-23 9:04 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 9:04 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
@ 2023-05-23 9:04 ` Ashwin Sekhar T K
2023-05-23 9:04 ` [PATCH v2 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
2023-05-23 9:04 ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
3 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:04 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.
Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.
The existing aura range set functionality has been moved
as a pool range set API.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_nix_queue.c | 2 +-
drivers/common/cnxk/roc_npa.c | 35 ++++++++++++++++++++++++-
drivers/common/cnxk/roc_npa.h | 6 +++++
drivers/common/cnxk/roc_sso.c | 2 +-
drivers/common/cnxk/version.map | 2 ++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 2 +-
6 files changed, 45 insertions(+), 4 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
goto npa_fail;
}
- roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+ roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
}
void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
uint64_t end_iova)
{
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
+ /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
}
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+ uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aura_lim *lim;
+
+ PLT_ASSERT(lf);
+ lim = lf->aura_lim;
+
+ /* Change only the bookkeeping in software */
+ lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+ lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+ uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_aura_lim *lim;
+ struct npa_lf *lf;
+
+ lf = idev_npa_obj_get();
+ PLT_ASSERT(lf);
+
+ lim = lf->aura_lim;
+ *start_iova = lim[aura_id].ptr_start;
+ *end_iova = lim[aura_id].ptr_end;
+}
+
static int
npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura,
struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+ uint64_t *start_iova,
+ uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+ uint64_t start_iova,
+ uint64_t end_iova);
int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
struct npa_aura_s *aura, int pool_id,
uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
- roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+ roc_npa_pool_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) !=
xaq->nb_xaq) {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 9414b55e9c..5281c71550 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -354,6 +354,7 @@ INTERNAL {
roc_npa_buf_type_update;
roc_npa_aura_drop_set;
roc_npa_aura_limit_modify;
+ roc_npa_aura_op_range_get;
roc_npa_aura_op_range_set;
roc_npa_ctx_dump;
roc_npa_dev_fini;
@@ -365,6 +366,7 @@ INTERNAL {
roc_npa_pool_create;
roc_npa_pool_destroy;
roc_npa_pool_op_pc_reset;
+ roc_npa_pool_op_range_set;
roc_npa_pool_range_update_check;
roc_npa_zero_aura_handle;
roc_npc_fini;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 1b6c4591bb..a1aeaee746 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -174,7 +174,7 @@ cnxk_mempool_populate(struct rte_mempool *mp, unsigned int max_objs,
plt_npa_dbg("requested objects %" PRIu64 ", possible objects %" PRIu64
"", (uint64_t)max_objs, (uint64_t)num_elts);
- roc_npa_aura_op_range_set(mp->pool_id, iova,
+ roc_npa_pool_op_range_set(mp->pool_id, iova,
iova + num_elts * total_elt_sz);
if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 4/5] mempool/cnxk: add hwpool ops
2023-05-23 9:04 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 9:04 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23 9:04 ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
@ 2023-05-23 9:04 ` Ashwin Sekhar T K
2023-05-23 9:04 ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
3 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:04 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, skori, skoteshwar, kirankumark, psatheesh, anoobj,
gakhil, hkalra, ndabilpuram
Add hwpool ops which can used to create a rte_mempool that attaches
to another rte_mempool. The hwpool will not have its own buffers and
will have a dummy populate callback. Only an NPA aura will be allocated
for this rte_mempool. The buffers will be allocate from the NPA pool
of the attached rte_mempool.
Only mbuf objects are supported in hwpool. Generic objects are not
supported. Note that this pool will not have any range check enabled.
So user will be able to free any pointer into this pool. HW will not
throw error interrupts if invalid buffers are passed. So user must be
careful when using this pool.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cn10k_hwpool_ops.c | 211 ++++++++++++++++++++++++
drivers/mempool/cnxk/cnxk_mempool.h | 4 +
drivers/mempool/cnxk/meson.build | 1 +
3 files changed, 216 insertions(+)
create mode 100644 drivers/mempool/cnxk/cn10k_hwpool_ops.c
diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
new file mode 100644
index 0000000000..9238765155
--- /dev/null
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include <rte_mempool.h>
+
+#include "roc_api.h"
+#include "cnxk_mempool.h"
+
+#define CN10K_HWPOOL_MEM_SIZE 128
+
+static int __rte_hot
+cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int n)
+{
+ struct rte_mempool *mp;
+ unsigned int index;
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ /* Ensure mbuf init changes are written before the free pointers
+ * are enqueued to the stack.
+ */
+ rte_io_wmb();
+ for (index = 0; index < n; index++) {
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mbuf *m;
+
+ m = PLT_PTR_CAST(obj_table[index]);
+ /* Update mempool information in the mbuf */
+ hdr = rte_mempool_get_header(obj_table[index]);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (hdr->mp != m->pool || hdr->mp != hp)
+ plt_err("Pool Header Mismatch");
+#endif
+ m->pool = mp;
+ hdr->mp = mp;
+ roc_npa_aura_op_free(hp->pool_id, 0,
+ (uint64_t)obj_table[index]);
+ }
+
+ return 0;
+}
+
+static int __rte_hot
+cn10k_hwpool_deq(struct rte_mempool *hp, void **obj_table, unsigned int n)
+{
+ unsigned int index;
+ uint64_t obj;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool *mp;
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+#endif
+
+ for (index = 0; index < n; index++, obj_table++) {
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mbuf *m;
+ int retry = 4;
+
+ /* Retry few times before failing */
+ do {
+ obj = roc_npa_aura_op_alloc(hp->pool_id, 0);
+ } while (retry-- && (obj == 0));
+
+ if (obj == 0) {
+ cn10k_hwpool_enq(hp, obj_table - index, index);
+ return -ENOENT;
+ }
+ /* Update mempool information in the mbuf */
+ hdr = rte_mempool_get_header(PLT_PTR_CAST(obj));
+ m = PLT_PTR_CAST(obj);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (hdr->mp != m->pool || hdr->mp != mp)
+ plt_err("Pool Header Mismatch");
+#endif
+ m->pool = hp;
+ hdr->mp = hp;
+ *obj_table = (void *)obj;
+ }
+
+ return 0;
+}
+
+static unsigned int
+cn10k_hwpool_get_count(const struct rte_mempool *hp)
+{
+ return (unsigned int)roc_npa_aura_op_available(hp->pool_id);
+}
+
+static int
+cn10k_hwpool_alloc(struct rte_mempool *hp)
+{
+ uint64_t aura_handle = 0;
+ struct rte_mempool *mp;
+ uint32_t pool_id;
+ int rc;
+
+ if (hp->cache_size) {
+ plt_err("Hwpool does not support cache");
+ return -EINVAL;
+ }
+
+ if (CNXK_MEMPOOL_FLAGS(hp)) {
+ plt_err("Flags must not be passed to hwpool ops");
+ return -EINVAL;
+ }
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ if (!mp) {
+ plt_err("Invalid rte_mempool passed as pool_config");
+ return -EINVAL;
+ }
+ if (mp->cache_size) {
+ plt_err("Hwpool does not support attaching to pool with cache");
+ return -EINVAL;
+ }
+
+ if (hp->elt_size != mp->elt_size ||
+ hp->header_size != mp->header_size ||
+ hp->trailer_size != mp->trailer_size || hp->size != mp->size) {
+ plt_err("Hwpool parameters matching with master pool");
+ return -EINVAL;
+ }
+
+ /* Create the NPA aura */
+ pool_id = roc_npa_aura_handle_to_aura(mp->pool_id);
+ rc = roc_npa_aura_create(&aura_handle, hp->size, NULL, (int)pool_id, 0);
+ if (rc) {
+ plt_err("Failed to create aura rc=%d", rc);
+ return rc;
+ }
+
+ /* Set the flags for the hardware pool */
+ CNXK_MEMPOOL_SET_FLAGS(hp, CNXK_MEMPOOL_F_IS_HWPOOL);
+ hp->pool_id = aura_handle;
+ plt_npa_dbg("aura_handle=0x%" PRIx64, aura_handle);
+
+ return 0;
+}
+
+static void
+cn10k_hwpool_free(struct rte_mempool *hp)
+{
+ int rc = 0;
+
+ plt_npa_dbg("aura_handle=0x%" PRIx64, hp->pool_id);
+ /* It can happen that rte_mempool_free() is called immediately after
+ * rte_mempool_create_empty(). In such cases the NPA pool will not be
+ * allocated.
+ */
+ if (roc_npa_aura_handle_to_base(hp->pool_id) == 0)
+ return;
+
+ rc = roc_npa_aura_destroy(hp->pool_id);
+ if (rc)
+ plt_err("Failed to destroy aura rc=%d", rc);
+}
+
+static ssize_t
+cn10k_hwpool_calc_mem_size(const struct rte_mempool *hp, uint32_t obj_num,
+ uint32_t pg_shift, size_t *min_chunk_size,
+ size_t *align)
+{
+ RTE_SET_USED(hp);
+ RTE_SET_USED(obj_num);
+ RTE_SET_USED(pg_shift);
+ *min_chunk_size = CN10K_HWPOOL_MEM_SIZE;
+ *align = CN10K_HWPOOL_MEM_SIZE;
+ /* Return a minimum mem size so that hwpool can also be initialized just
+ * like a regular pool. This memzone will not be used anywhere.
+ */
+ return CN10K_HWPOOL_MEM_SIZE;
+}
+
+static int
+cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ uint64_t start_iova, end_iova;
+ struct rte_mempool *mp;
+
+ RTE_SET_USED(max_objs);
+ RTE_SET_USED(vaddr);
+ RTE_SET_USED(iova);
+ RTE_SET_USED(len);
+ RTE_SET_USED(obj_cb);
+ RTE_SET_USED(obj_cb_arg);
+ /* HW pools does not require populating anything as these pools are
+ * only associated with NPA aura. The NPA pool being used is that of
+ * another rte_mempool. Only copy the iova range from the aura of
+ * the other rte_mempool to this pool's aura.
+ */
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ roc_npa_aura_op_range_get(mp->pool_id, &start_iova, &end_iova);
+ roc_npa_aura_op_range_set(hp->pool_id, start_iova, end_iova);
+
+ return hp->size;
+}
+
+static struct rte_mempool_ops cn10k_hwpool_ops = {
+ .name = "cn10k_hwpool_ops",
+ .alloc = cn10k_hwpool_alloc,
+ .free = cn10k_hwpool_free,
+ .enqueue = cn10k_hwpool_enq,
+ .dequeue = cn10k_hwpool_deq,
+ .get_count = cn10k_hwpool_get_count,
+ .calc_mem_size = cn10k_hwpool_calc_mem_size,
+ .populate = cn10k_hwpool_populate,
+};
+
+RTE_MEMPOOL_REGISTER_OPS(cn10k_hwpool_ops);
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index fc2e4b5b70..4ca05d53e1 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -16,6 +16,10 @@ enum cnxk_mempool_flags {
* as pool config to create the pool.
*/
CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+ /* This flag indicates whether the pool is a hardware pool or not.
+ * This flag is set by the driver.
+ */
+ CNXK_MEMPOOL_F_IS_HWPOOL = RTE_BIT64(2),
};
#define CNXK_MEMPOOL_F_MASK 0xFUL
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
index 50856ecde8..ce152bedd2 100644
--- a/drivers/mempool/cnxk/meson.build
+++ b/drivers/mempool/cnxk/meson.build
@@ -14,6 +14,7 @@ sources = files(
'cnxk_mempool_telemetry.c',
'cn9k_mempool_ops.c',
'cn10k_mempool_ops.c',
+ 'cn10k_hwpool_ops.c',
)
deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools
2023-05-23 9:04 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
` (2 preceding siblings ...)
2023-05-23 9:04 ` [PATCH v2 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
@ 2023-05-23 9:04 ` Ashwin Sekhar T K
2023-05-24 9:33 ` Jerin Jacob
3 siblings, 1 reply; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:04 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, skori, skoteshwar, kirankumark, psatheesh, anoobj,
gakhil, hkalra, ndabilpuram
Add the following cnxk mempool PMD APIs to facilitate exchanging mbufs
between pools.
* rte_pmd_cnxk_mempool_is_hwpool() - Allows user to check whether a pool
is hwpool or not.
* rte_pmd_cnxk_mempool_range_check_disable() - Disables range checking on
any rte_mempool.
* rte_pmd_cnxk_mempool_mbuf_exchange() - Exchanges mbufs between any two
rte_mempool where the range check is disabled.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
doc/api/doxy-api-index.md | 1 +
doc/api/doxy-api.conf.in | 1 +
drivers/mempool/cnxk/cn10k_hwpool_ops.c | 63 ++++++++++++++++++++-
drivers/mempool/cnxk/cnxk_mempool.h | 4 ++
drivers/mempool/cnxk/meson.build | 1 +
drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h | 56 ++++++++++++++++++
drivers/mempool/cnxk/version.map | 10 ++++
7 files changed, 135 insertions(+), 1 deletion(-)
create mode 100644 drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
create mode 100644 drivers/mempool/cnxk/version.map
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index c709fd48ad..a781b8f408 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -49,6 +49,7 @@ The public API headers are grouped by topics:
[iavf](@ref rte_pmd_iavf.h),
[bnxt](@ref rte_pmd_bnxt.h),
[cnxk](@ref rte_pmd_cnxk.h),
+ [cnxk_mempool](@ref rte_pmd_cnxk_mempool.h),
[dpaa](@ref rte_pmd_dpaa.h),
[dpaa2](@ref rte_pmd_dpaa2.h),
[mlx5](@ref rte_pmd_mlx5.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index d230a19e1f..7e68e43c64 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -9,6 +9,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
@TOPDIR@/drivers/crypto/scheduler \
@TOPDIR@/drivers/dma/dpaa2 \
@TOPDIR@/drivers/event/dlb2 \
+ @TOPDIR@/drivers/mempool/cnxk \
@TOPDIR@/drivers/mempool/dpaa2 \
@TOPDIR@/drivers/net/ark \
@TOPDIR@/drivers/net/bnxt \
diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
index 9238765155..b234481ec1 100644
--- a/drivers/mempool/cnxk/cn10k_hwpool_ops.c
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -3,11 +3,14 @@
*/
#include <rte_mempool.h>
+#include <rte_pmd_cnxk_mempool.h>
#include "roc_api.h"
#include "cnxk_mempool.h"
-#define CN10K_HWPOOL_MEM_SIZE 128
+#define CN10K_HWPOOL_MEM_SIZE 128
+#define CN10K_NPA_IOVA_RANGE_MIN 0x0
+#define CN10K_NPA_IOVA_RANGE_MAX 0x1fffffffffff80
static int __rte_hot
cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int n)
@@ -197,6 +200,64 @@ cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int max_objs,
return hp->size;
}
+int
+rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1, struct rte_mbuf *m2)
+{
+ struct rte_mempool_objhdr *hdr;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(CNXK_MEMPOOL_FLAGS(m1->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK) ||
+ !(CNXK_MEMPOOL_FLAGS(m2->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)) {
+ plt_err("Pools must have range check disabled");
+ return -EINVAL;
+ }
+ if (m1->pool->elt_size != m2->pool->elt_size ||
+ m1->pool->header_size != m2->pool->header_size ||
+ m1->pool->trailer_size != m2->pool->trailer_size ||
+ m1->pool->size != m2->pool->size) {
+ plt_err("Parameters of pools involved in exchange does not match");
+ return -EINVAL;
+ }
+#endif
+ RTE_SWAP(m1->pool, m2->pool);
+ hdr = rte_mempool_get_header(m1);
+ hdr->mp = m1->pool;
+ hdr = rte_mempool_get_header(m2);
+ hdr->mp = m2->pool;
+ return 0;
+}
+
+int
+rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp)
+{
+ return !!(CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_IS_HWPOOL);
+}
+
+int
+rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp)
+{
+ if (rte_pmd_cnxk_mempool_is_hwpool(mp)) {
+ /* Disable only aura range check for hardware pools */
+ roc_npa_aura_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+ CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+ mp = CNXK_MEMPOOL_CONFIG(mp);
+ }
+
+ /* No need to disable again if already disabled */
+ if (CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)
+ return 0;
+
+ /* Disable aura/pool range check */
+ roc_npa_pool_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+ if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
+ return -EBUSY;
+
+ CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+ return 0;
+}
+
static struct rte_mempool_ops cn10k_hwpool_ops = {
.name = "cn10k_hwpool_ops",
.alloc = cn10k_hwpool_alloc,
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index 4ca05d53e1..669e617952 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -20,6 +20,10 @@ enum cnxk_mempool_flags {
* This flag is set by the driver.
*/
CNXK_MEMPOOL_F_IS_HWPOOL = RTE_BIT64(2),
+ /* This flag indicates whether range check has been disabled for
+ * the pool. This flag is set by the driver.
+ */
+ CNXK_MEMPOOL_F_NO_RANGE_CHECK = RTE_BIT64(3),
};
#define CNXK_MEMPOOL_F_MASK 0xFUL
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
index ce152bedd2..e388cce26a 100644
--- a/drivers/mempool/cnxk/meson.build
+++ b/drivers/mempool/cnxk/meson.build
@@ -17,5 +17,6 @@ sources = files(
'cn10k_hwpool_ops.c',
)
+headers = files('rte_pmd_cnxk_mempool.h')
deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
require_iova_in_mbuf = false
diff --git a/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h b/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
new file mode 100644
index 0000000000..ada6e7cd4d
--- /dev/null
+++ b/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+/**
+ * @file rte_pmd_cnxk_mempool.h
+ * Marvell CNXK Mempool PMD specific functions.
+ *
+ **/
+
+#ifndef _PMD_CNXK_MEMPOOL_H_
+#define _PMD_CNXK_MEMPOOL_H_
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+/**
+ * Exchange mbufs between two mempools.
+ *
+ * @param m1
+ * First mbuf
+ * @param m2
+ * Second mbuf
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1,
+ struct rte_mbuf *m2);
+
+/**
+ * Check whether a mempool is a hwpool.
+ *
+ * @param mp
+ * Mempool to check.
+ *
+ * @return
+ * 1 if mp is a hwpool, 0 otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp);
+
+/**
+ * Disable buffer address range check on a mempool.
+ *
+ * @param mp
+ * Mempool to disable range check on.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp);
+
+#endif /* _PMD_CNXK_MEMPOOL_H_ */
diff --git a/drivers/mempool/cnxk/version.map b/drivers/mempool/cnxk/version.map
new file mode 100644
index 0000000000..755731e3b5
--- /dev/null
+++ b/drivers/mempool/cnxk/version.map
@@ -0,0 +1,10 @@
+ DPDK_23 {
+ local: *;
+ };
+
+ EXPERIMENTAL {
+ global:
+ rte_pmd_cnxk_mempool_is_hwpool;
+ rte_pmd_cnxk_mempool_mbuf_exchange;
+ rte_pmd_cnxk_mempool_range_check_disable;
+ };
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools
2023-05-23 9:04 ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
@ 2023-05-24 9:33 ` Jerin Jacob
0 siblings, 0 replies; 22+ messages in thread
From: Jerin Jacob @ 2023-05-24 9:33 UTC (permalink / raw)
To: Ashwin Sekhar T K
Cc: dev, Pavan Nikhilesh, jerinj, skori, skoteshwar, kirankumark,
psatheesh, anoobj, gakhil, hkalra, ndabilpuram
On Tue, May 23, 2023 at 6:30 PM Ashwin Sekhar T K <asekhar@marvell.com> wrote:
>
> Add the following cnxk mempool PMD APIs to facilitate exchanging mbufs
> between pools.
> * rte_pmd_cnxk_mempool_is_hwpool() - Allows user to check whether a pool
> is hwpool or not.
> * rte_pmd_cnxk_mempool_range_check_disable() - Disables range checking on
> any rte_mempool.
> * rte_pmd_cnxk_mempool_mbuf_exchange() - Exchanges mbufs between any two
> rte_mempool where the range check is disabled.
>
> Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
Series applied to dpdk-next-net-mrvl/for-next-net. Thanks
> ---
> doc/api/doxy-api-index.md | 1 +
> doc/api/doxy-api.conf.in | 1 +
> drivers/mempool/cnxk/cn10k_hwpool_ops.c | 63 ++++++++++++++++++++-
> drivers/mempool/cnxk/cnxk_mempool.h | 4 ++
> drivers/mempool/cnxk/meson.build | 1 +
> drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h | 56 ++++++++++++++++++
> drivers/mempool/cnxk/version.map | 10 ++++
> 7 files changed, 135 insertions(+), 1 deletion(-)
> create mode 100644 drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
> create mode 100644 drivers/mempool/cnxk/version.map
>
> diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> index c709fd48ad..a781b8f408 100644
> --- a/doc/api/doxy-api-index.md
> +++ b/doc/api/doxy-api-index.md
> @@ -49,6 +49,7 @@ The public API headers are grouped by topics:
> [iavf](@ref rte_pmd_iavf.h),
> [bnxt](@ref rte_pmd_bnxt.h),
> [cnxk](@ref rte_pmd_cnxk.h),
> + [cnxk_mempool](@ref rte_pmd_cnxk_mempool.h),
> [dpaa](@ref rte_pmd_dpaa.h),
> [dpaa2](@ref rte_pmd_dpaa2.h),
> [mlx5](@ref rte_pmd_mlx5.h),
> diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
> index d230a19e1f..7e68e43c64 100644
> --- a/doc/api/doxy-api.conf.in
> +++ b/doc/api/doxy-api.conf.in
> @@ -9,6 +9,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
> @TOPDIR@/drivers/crypto/scheduler \
> @TOPDIR@/drivers/dma/dpaa2 \
> @TOPDIR@/drivers/event/dlb2 \
> + @TOPDIR@/drivers/mempool/cnxk \
> @TOPDIR@/drivers/mempool/dpaa2 \
> @TOPDIR@/drivers/net/ark \
> @TOPDIR@/drivers/net/bnxt \
> diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
> index 9238765155..b234481ec1 100644
> --- a/drivers/mempool/cnxk/cn10k_hwpool_ops.c
> +++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
> @@ -3,11 +3,14 @@
> */
>
> #include <rte_mempool.h>
> +#include <rte_pmd_cnxk_mempool.h>
>
> #include "roc_api.h"
> #include "cnxk_mempool.h"
>
> -#define CN10K_HWPOOL_MEM_SIZE 128
> +#define CN10K_HWPOOL_MEM_SIZE 128
> +#define CN10K_NPA_IOVA_RANGE_MIN 0x0
> +#define CN10K_NPA_IOVA_RANGE_MAX 0x1fffffffffff80
>
> static int __rte_hot
> cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int n)
> @@ -197,6 +200,64 @@ cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int max_objs,
> return hp->size;
> }
>
> +int
> +rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1, struct rte_mbuf *m2)
> +{
> + struct rte_mempool_objhdr *hdr;
> +
> +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
> + if (!(CNXK_MEMPOOL_FLAGS(m1->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK) ||
> + !(CNXK_MEMPOOL_FLAGS(m2->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)) {
> + plt_err("Pools must have range check disabled");
> + return -EINVAL;
> + }
> + if (m1->pool->elt_size != m2->pool->elt_size ||
> + m1->pool->header_size != m2->pool->header_size ||
> + m1->pool->trailer_size != m2->pool->trailer_size ||
> + m1->pool->size != m2->pool->size) {
> + plt_err("Parameters of pools involved in exchange does not match");
> + return -EINVAL;
> + }
> +#endif
> + RTE_SWAP(m1->pool, m2->pool);
> + hdr = rte_mempool_get_header(m1);
> + hdr->mp = m1->pool;
> + hdr = rte_mempool_get_header(m2);
> + hdr->mp = m2->pool;
> + return 0;
> +}
> +
> +int
> +rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp)
> +{
> + return !!(CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_IS_HWPOOL);
> +}
> +
> +int
> +rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp)
> +{
> + if (rte_pmd_cnxk_mempool_is_hwpool(mp)) {
> + /* Disable only aura range check for hardware pools */
> + roc_npa_aura_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
> + CN10K_NPA_IOVA_RANGE_MAX);
> + CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
> + mp = CNXK_MEMPOOL_CONFIG(mp);
> + }
> +
> + /* No need to disable again if already disabled */
> + if (CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)
> + return 0;
> +
> + /* Disable aura/pool range check */
> + roc_npa_pool_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
> + CN10K_NPA_IOVA_RANGE_MAX);
> + if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
> + return -EBUSY;
> +
> + CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
> + return 0;
> +}
> +
> static struct rte_mempool_ops cn10k_hwpool_ops = {
> .name = "cn10k_hwpool_ops",
> .alloc = cn10k_hwpool_alloc,
> diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
> index 4ca05d53e1..669e617952 100644
> --- a/drivers/mempool/cnxk/cnxk_mempool.h
> +++ b/drivers/mempool/cnxk/cnxk_mempool.h
> @@ -20,6 +20,10 @@ enum cnxk_mempool_flags {
> * This flag is set by the driver.
> */
> CNXK_MEMPOOL_F_IS_HWPOOL = RTE_BIT64(2),
> + /* This flag indicates whether range check has been disabled for
> + * the pool. This flag is set by the driver.
> + */
> + CNXK_MEMPOOL_F_NO_RANGE_CHECK = RTE_BIT64(3),
> };
>
> #define CNXK_MEMPOOL_F_MASK 0xFUL
> diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
> index ce152bedd2..e388cce26a 100644
> --- a/drivers/mempool/cnxk/meson.build
> +++ b/drivers/mempool/cnxk/meson.build
> @@ -17,5 +17,6 @@ sources = files(
> 'cn10k_hwpool_ops.c',
> )
>
> +headers = files('rte_pmd_cnxk_mempool.h')
> deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
> require_iova_in_mbuf = false
> diff --git a/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h b/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
> new file mode 100644
> index 0000000000..ada6e7cd4d
> --- /dev/null
> +++ b/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
> @@ -0,0 +1,56 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2023 Marvell.
> + */
> +
> +/**
> + * @file rte_pmd_cnxk_mempool.h
> + * Marvell CNXK Mempool PMD specific functions.
> + *
> + **/
> +
> +#ifndef _PMD_CNXK_MEMPOOL_H_
> +#define _PMD_CNXK_MEMPOOL_H_
> +
> +#include <rte_mbuf.h>
> +#include <rte_mempool.h>
> +
> +/**
> + * Exchange mbufs between two mempools.
> + *
> + * @param m1
> + * First mbuf
> + * @param m2
> + * Second mbuf
> + *
> + * @return
> + * 0 on success, a negative errno value otherwise.
> + */
> +__rte_experimental
> +int rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1,
> + struct rte_mbuf *m2);
> +
> +/**
> + * Check whether a mempool is a hwpool.
> + *
> + * @param mp
> + * Mempool to check.
> + *
> + * @return
> + * 1 if mp is a hwpool, 0 otherwise.
> + */
> +__rte_experimental
> +int rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp);
> +
> +/**
> + * Disable buffer address range check on a mempool.
> + *
> + * @param mp
> + * Mempool to disable range check on.
> + *
> + * @return
> + * 0 on success, a negative errno value otherwise.
> + */
> +__rte_experimental
> +int rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp);
> +
> +#endif /* _PMD_CNXK_MEMPOOL_H_ */
> diff --git a/drivers/mempool/cnxk/version.map b/drivers/mempool/cnxk/version.map
> new file mode 100644
> index 0000000000..755731e3b5
> --- /dev/null
> +++ b/drivers/mempool/cnxk/version.map
> @@ -0,0 +1,10 @@
> + DPDK_23 {
> + local: *;
> + };
> +
> + EXPERIMENTAL {
> + global:
> + rte_pmd_cnxk_mempool_is_hwpool;
> + rte_pmd_cnxk_mempool_mbuf_exchange;
> + rte_pmd_cnxk_mempool_range_check_disable;
> + };
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags
2023-04-11 7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
` (5 preceding siblings ...)
2023-05-23 9:04 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
@ 2023-05-23 9:13 ` Ashwin Sekhar T K
2023-05-23 9:13 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
` (2 more replies)
2023-05-23 10:54 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
7 siblings, 3 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:13 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh, Nithin Dabilpuram,
Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Use lower bits of pool_config to pass flags specific to
cnxk mempool PMD ops.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cnxk_mempool.h | 24 ++++++++++++++++++++++++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 17 ++++++++++-------
drivers/net/cnxk/cnxk_ethdev_sec.c | 25 ++++++-------------------
3 files changed, 40 insertions(+), 26 deletions(-)
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index 3405aa7663..fc2e4b5b70 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -7,6 +7,30 @@
#include <rte_mempool.h>
+enum cnxk_mempool_flags {
+ /* This flag is used to ensure that only aura zero is allocated.
+ * If aura zero is not available, then mempool creation fails.
+ */
+ CNXK_MEMPOOL_F_ZERO_AURA = RTE_BIT64(0),
+ /* Here the pool create will use the npa_aura_s structure passed
+ * as pool config to create the pool.
+ */
+ CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+};
+
+#define CNXK_MEMPOOL_F_MASK 0xFUL
+
+#define CNXK_MEMPOOL_FLAGS(_m) \
+ (PLT_U64_CAST((_m)->pool_config) & CNXK_MEMPOOL_F_MASK)
+#define CNXK_MEMPOOL_CONFIG(_m) \
+ (PLT_PTR_CAST(PLT_U64_CAST((_m)->pool_config) & ~CNXK_MEMPOOL_F_MASK))
+#define CNXK_MEMPOOL_SET_FLAGS(_m, _f) \
+ do { \
+ void *_c = CNXK_MEMPOOL_CONFIG(_m); \
+ uint64_t _flags = CNXK_MEMPOOL_FLAGS(_m) | (_f); \
+ (_m)->pool_config = PLT_PTR_CAST(PLT_U64_CAST(_c) | _flags); \
+ } while (0)
+
unsigned int cnxk_mempool_get_count(const struct rte_mempool *mp);
ssize_t cnxk_mempool_calc_mem_size(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 3769afd3d1..1b6c4591bb 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,7 +72,7 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
int
cnxk_mempool_alloc(struct rte_mempool *mp)
{
- uint32_t block_count, flags = 0;
+ uint32_t block_count, flags, roc_flags = 0;
uint64_t aura_handle = 0;
struct npa_aura_s aura;
struct npa_pool_s pool;
@@ -96,15 +96,18 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
pool.nat_align = 1;
pool.buf_offset = mp->header_size / ROC_ALIGN;
- /* Use driver specific mp->pool_config to override aura config */
- if (mp->pool_config != NULL)
- memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+ flags = CNXK_MEMPOOL_FLAGS(mp);
+ if (flags & CNXK_MEMPOOL_F_ZERO_AURA) {
+ roc_flags = ROC_NPA_ZERO_AURA_F;
+ } else if (flags & CNXK_MEMPOOL_F_CUSTOM_AURA) {
+ struct npa_aura_s *paura;
- if (aura.ena && aura.pool_addr == 0)
- flags = ROC_NPA_ZERO_AURA_F;
+ paura = CNXK_MEMPOOL_CONFIG(mp);
+ memcpy(&aura, paura, sizeof(struct npa_aura_s));
+ }
rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
- &pool, flags);
+ &pool, roc_flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index aa8a378a00..cd64daacc0 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -3,6 +3,7 @@
*/
#include <cnxk_ethdev.h>
+#include <cnxk_mempool.h>
#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
@@ -43,7 +44,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
{
const char *mp_name = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
- struct npa_aura_s *aura;
struct rte_mempool *mp;
uint16_t first_skip;
int rc;
@@ -65,7 +65,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
return -EINVAL;
}
- plt_free(mp->pool_config);
rte_mempool_free(mp);
*aura_handle = 0;
@@ -84,22 +83,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
return -EIO;
}
- /* Indicate to allocate zero aura */
- aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
- if (!aura) {
- rc = -ENOMEM;
- goto free_mp;
- }
- aura->ena = 1;
- if (!mempool_name)
- aura->pool_addr = 0;
- else
- aura->pool_addr = 1; /* Any non zero value, so that alloc from next free Index */
-
- rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(), aura);
+ rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
+ mempool_name ?
+ NULL : PLT_PTR_CAST(CNXK_MEMPOOL_F_ZERO_AURA));
if (rc) {
plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
- goto free_aura;
+ goto free_mp;
}
/* Init mempool private area */
@@ -113,15 +102,13 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
rc = rte_mempool_populate_default(mp);
if (rc < 0) {
plt_err("Failed to create inline meta pool, rc=%d", rc);
- goto free_aura;
+ goto free_mp;
}
rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
*aura_handle = mp->pool_id;
*mpool = (uintptr_t)mp;
return 0;
-free_aura:
- plt_free(aura);
free_mp:
rte_mempool_free(mp);
return rc;
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs
2023-05-23 9:13 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
@ 2023-05-23 9:13 ` Ashwin Sekhar T K
2023-05-23 9:13 ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
2023-05-23 9:27 ` Ashwin Sekhar T K
2 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:13 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: jerinj, pbhagavatula, psatheesh, asekhar, anoobj, gakhil, hkalra
Add ROC APIs which allows to create NPA auras independently and
attach it to an existing NPA pool. Also add API to destroy
NPA auras independently.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_npa.c | 219 ++++++++++++++++++++++++++++++++
drivers/common/cnxk/roc_npa.h | 4 +
drivers/common/cnxk/version.map | 2 +
3 files changed, 225 insertions(+)
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 20637fbf65..e3c925ddd1 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -85,6 +85,36 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura
return rc;
}
+static int
+npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
+{
+ struct npa_aq_enq_req *aura_init_req;
+ struct npa_aq_enq_rsp *aura_init_rsp;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ mbox = mbox_get(m_box);
+ aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (aura_init_req == NULL)
+ goto exit;
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_INIT;
+ mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+ rc = mbox_process_msg(mbox, (void **)&aura_init_rsp);
+ if (rc < 0)
+ goto exit;
+
+ if (aura_init_rsp->hdr.rc == 0)
+ rc = 0;
+ else
+ rc = NPA_ERR_AURA_POOL_INIT;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
static int
npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
{
@@ -156,6 +186,54 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
return rc;
}
+static int
+npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
+{
+ struct npa_aq_enq_req *aura_req;
+ struct npa_aq_enq_rsp *aura_rsp;
+ struct ndc_sync_op *ndc_req;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ /* Procedure for disabling an aura/pool */
+ plt_delay_us(10);
+
+ mbox = mbox_get(m_box);
+ aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (aura_req == NULL)
+ goto exit;
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+ aura_req->aura.ena = 0;
+ aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+ rc = mbox_process_msg(mbox, (void **)&aura_rsp);
+ if (rc < 0)
+ goto exit;
+
+ if (aura_rsp->hdr.rc != 0)
+ return NPA_ERR_AURA_POOL_FINI;
+
+ /* Sync NDC-NPA for LF */
+ ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+ if (ndc_req == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+ ndc_req->npa_lf_sync = 1;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+ rc = NPA_ERR_AURA_POOL_FINI;
+ goto exit;
+ }
+ rc = 0;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
int
roc_npa_pool_op_pc_reset(uint64_t aura_handle)
{
@@ -493,6 +571,108 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
return rc;
}
+static int
+npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id,
+ struct npa_aura_s *aura, uint64_t *aura_handle, uint32_t flags)
+{
+ int rc, aura_id;
+
+ /* Sanity check */
+ if (!lf || !aura || !aura_handle)
+ return NPA_ERR_PARAM;
+
+ roc_npa_dev_lock();
+ /* Get aura_id from resource bitmap */
+ aura_id = find_free_aura(lf, flags);
+ if (aura_id < 0) {
+ roc_npa_dev_unlock();
+ return NPA_ERR_AURA_ID_ALLOC;
+ }
+
+ /* Mark aura as reserved */
+ plt_bitmap_clear(lf->npa_bmp, aura_id);
+
+ roc_npa_dev_unlock();
+ rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
+ aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
+ NPA_ERR_AURA_ID_ALLOC :
+ 0;
+ if (rc)
+ goto exit;
+
+ /* Update aura fields */
+ aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
+ aura->ena = 1;
+ aura->shift = plt_log2_u32(block_count);
+ aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
+ aura->limit = block_count;
+ aura->pool_caching = 1;
+ aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+ aura->avg_con = 0;
+ /* Many to one reduction */
+ aura->err_qint_idx = aura_id % lf->qints;
+
+ /* Issue AURA_INIT and POOL_INIT op */
+ rc = npa_aura_init(lf->mbox, aura_id, aura);
+ if (rc)
+ return rc;
+
+ *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base);
+
+ return 0;
+
+exit:
+ return rc;
+}
+
+int
+roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
+ struct npa_aura_s *aura, int pool_id, uint32_t flags)
+{
+ struct npa_aura_s defaura;
+ struct idev_cfg *idev;
+ struct npa_lf *lf;
+ int rc;
+
+ lf = idev_npa_obj_get();
+ if (lf == NULL) {
+ rc = NPA_ERR_DEVICE_NOT_BOUNDED;
+ goto error;
+ }
+
+ idev = idev_get_cfg();
+ if (idev == NULL) {
+ rc = NPA_ERR_ALLOC;
+ goto error;
+ }
+
+ if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) {
+ rc = NPA_ERR_ALLOC;
+ goto error;
+ }
+
+ if (aura == NULL) {
+ memset(&defaura, 0, sizeof(struct npa_aura_s));
+ aura = &defaura;
+ }
+
+ rc = npa_aura_alloc(lf, block_count, pool_id, aura, aura_handle, flags);
+ if (rc) {
+ plt_err("Failed to alloc aura rc=%d", rc);
+ goto error;
+ }
+
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, *aura_handle);
+
+ /* Just hold the reference of the object */
+ __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);
+error:
+ return rc;
+}
+
int
roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)
{
@@ -561,6 +741,45 @@ roc_npa_pool_destroy(uint64_t aura_handle)
return rc;
}
+static int
+npa_aura_free(struct npa_lf *lf, uint64_t aura_handle)
+{
+ int aura_id, rc;
+
+ if (!lf || !aura_handle)
+ return NPA_ERR_PARAM;
+
+ aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ rc = npa_aura_fini(lf->mbox, aura_id);
+
+ if (rc)
+ return rc;
+
+ memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr));
+
+ roc_npa_dev_lock();
+ plt_bitmap_set(lf->npa_bmp, aura_id);
+ roc_npa_dev_unlock();
+
+ return rc;
+}
+
+int
+roc_npa_aura_destroy(uint64_t aura_handle)
+{
+ struct npa_lf *lf = idev_npa_obj_get();
+ int rc = 0;
+
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
+ rc = npa_aura_free(lf, aura_handle);
+ if (rc)
+ plt_err("Failed to destroy aura rc=%d", rc);
+
+ /* Release the reference of npa */
+ rc |= npa_lf_fini();
+ return rc;
+}
+
int
roc_npa_pool_range_update_check(uint64_t aura_handle)
{
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index dd588b0322..df15dabe92 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,10 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
+ struct npa_aura_s *aura, int pool_id,
+ uint32_t flags);
+int __roc_api roc_npa_aura_destroy(uint64_t aura_handle);
uint64_t __roc_api roc_npa_zero_aura_handle(void);
int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index b298a21b84..9414b55e9c 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -347,6 +347,8 @@ INTERNAL {
roc_nix_vlan_mcam_entry_write;
roc_nix_vlan_strip_vtag_ena_dis;
roc_nix_vlan_tpid_set;
+ roc_npa_aura_create;
+ roc_npa_aura_destroy;
roc_npa_buf_type_mask;
roc_npa_buf_type_limit_get;
roc_npa_buf_type_update;
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs
2023-05-23 9:13 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 9:13 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
@ 2023-05-23 9:13 ` Ashwin Sekhar T K
2023-05-23 9:27 ` Ashwin Sekhar T K
2 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:13 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.
Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.
The existing aura range set functionality has been moved
as a pool range set API.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_nix_queue.c | 2 +-
drivers/common/cnxk/roc_npa.c | 35 ++++++++++++++++++++++++-
drivers/common/cnxk/roc_npa.h | 6 +++++
drivers/common/cnxk/roc_sso.c | 2 +-
drivers/common/cnxk/version.map | 2 ++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 2 +-
6 files changed, 45 insertions(+), 4 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
goto npa_fail;
}
- roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+ roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
}
void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
uint64_t end_iova)
{
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
+ /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
}
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+ uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aura_lim *lim;
+
+ PLT_ASSERT(lf);
+ lim = lf->aura_lim;
+
+ /* Change only the bookkeeping in software */
+ lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+ lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+ uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_aura_lim *lim;
+ struct npa_lf *lf;
+
+ lf = idev_npa_obj_get();
+ PLT_ASSERT(lf);
+
+ lim = lf->aura_lim;
+ *start_iova = lim[aura_id].ptr_start;
+ *end_iova = lim[aura_id].ptr_end;
+}
+
static int
npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura,
struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+ uint64_t *start_iova,
+ uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+ uint64_t start_iova,
+ uint64_t end_iova);
int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
struct npa_aura_s *aura, int pool_id,
uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
- roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+ roc_npa_pool_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) !=
xaq->nb_xaq) {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 9414b55e9c..5281c71550 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -354,6 +354,7 @@ INTERNAL {
roc_npa_buf_type_update;
roc_npa_aura_drop_set;
roc_npa_aura_limit_modify;
+ roc_npa_aura_op_range_get;
roc_npa_aura_op_range_set;
roc_npa_ctx_dump;
roc_npa_dev_fini;
@@ -365,6 +366,7 @@ INTERNAL {
roc_npa_pool_create;
roc_npa_pool_destroy;
roc_npa_pool_op_pc_reset;
+ roc_npa_pool_op_range_set;
roc_npa_pool_range_update_check;
roc_npa_zero_aura_handle;
roc_npc_fini;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 1b6c4591bb..a1aeaee746 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -174,7 +174,7 @@ cnxk_mempool_populate(struct rte_mempool *mp, unsigned int max_objs,
plt_npa_dbg("requested objects %" PRIu64 ", possible objects %" PRIu64
"", (uint64_t)max_objs, (uint64_t)num_elts);
- roc_npa_aura_op_range_set(mp->pool_id, iova,
+ roc_npa_pool_op_range_set(mp->pool_id, iova,
iova + num_elts * total_elt_sz);
if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs
2023-05-23 9:13 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 9:13 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23 9:13 ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
@ 2023-05-23 9:27 ` Ashwin Sekhar T K
2 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 9:27 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.
Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.
The existing aura range set functionality has been moved
as a pool range set API.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_nix_queue.c | 2 +-
drivers/common/cnxk/roc_npa.c | 35 ++++++++++++++++++++++++-
drivers/common/cnxk/roc_npa.h | 6 +++++
drivers/common/cnxk/roc_sso.c | 2 +-
drivers/common/cnxk/version.map | 2 ++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 2 +-
6 files changed, 45 insertions(+), 4 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
goto npa_fail;
}
- roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+ roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
}
void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
uint64_t end_iova)
{
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
+ /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
}
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+ uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aura_lim *lim;
+
+ PLT_ASSERT(lf);
+ lim = lf->aura_lim;
+
+ /* Change only the bookkeeping in software */
+ lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+ lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+ uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_aura_lim *lim;
+ struct npa_lf *lf;
+
+ lf = idev_npa_obj_get();
+ PLT_ASSERT(lf);
+
+ lim = lf->aura_lim;
+ *start_iova = lim[aura_id].ptr_start;
+ *end_iova = lim[aura_id].ptr_end;
+}
+
static int
npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura,
struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+ uint64_t *start_iova,
+ uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+ uint64_t start_iova,
+ uint64_t end_iova);
int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
struct npa_aura_s *aura, int pool_id,
uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
- roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+ roc_npa_pool_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) !=
xaq->nb_xaq) {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 9414b55e9c..5281c71550 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -354,6 +354,7 @@ INTERNAL {
roc_npa_buf_type_update;
roc_npa_aura_drop_set;
roc_npa_aura_limit_modify;
+ roc_npa_aura_op_range_get;
roc_npa_aura_op_range_set;
roc_npa_ctx_dump;
roc_npa_dev_fini;
@@ -365,6 +366,7 @@ INTERNAL {
roc_npa_pool_create;
roc_npa_pool_destroy;
roc_npa_pool_op_pc_reset;
+ roc_npa_pool_op_range_set;
roc_npa_pool_range_update_check;
roc_npa_zero_aura_handle;
roc_npc_fini;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 1b6c4591bb..a1aeaee746 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -174,7 +174,7 @@ cnxk_mempool_populate(struct rte_mempool *mp, unsigned int max_objs,
plt_npa_dbg("requested objects %" PRIu64 ", possible objects %" PRIu64
"", (uint64_t)max_objs, (uint64_t)num_elts);
- roc_npa_aura_op_range_set(mp->pool_id, iova,
+ roc_npa_pool_op_range_set(mp->pool_id, iova,
iova + num_elts * total_elt_sz);
if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags
2023-04-11 7:55 [PATCH 0/5] add hwpools and support exchanging mbufs between pools Ashwin Sekhar T K
` (6 preceding siblings ...)
2023-05-23 9:13 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
@ 2023-05-23 10:54 ` Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
` (3 more replies)
7 siblings, 4 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 10:54 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh, Nithin Dabilpuram,
Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Use lower bits of pool_config to pass flags specific to
cnxk mempool PMD ops.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cnxk_mempool.h | 24 ++++++++++++++++++++++++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 17 ++++++++++-------
drivers/net/cnxk/cnxk_ethdev_sec.c | 25 ++++++-------------------
3 files changed, 40 insertions(+), 26 deletions(-)
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index 3405aa7663..fc2e4b5b70 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -7,6 +7,30 @@
#include <rte_mempool.h>
+enum cnxk_mempool_flags {
+ /* This flag is used to ensure that only aura zero is allocated.
+ * If aura zero is not available, then mempool creation fails.
+ */
+ CNXK_MEMPOOL_F_ZERO_AURA = RTE_BIT64(0),
+ /* Here the pool create will use the npa_aura_s structure passed
+ * as pool config to create the pool.
+ */
+ CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+};
+
+#define CNXK_MEMPOOL_F_MASK 0xFUL
+
+#define CNXK_MEMPOOL_FLAGS(_m) \
+ (PLT_U64_CAST((_m)->pool_config) & CNXK_MEMPOOL_F_MASK)
+#define CNXK_MEMPOOL_CONFIG(_m) \
+ (PLT_PTR_CAST(PLT_U64_CAST((_m)->pool_config) & ~CNXK_MEMPOOL_F_MASK))
+#define CNXK_MEMPOOL_SET_FLAGS(_m, _f) \
+ do { \
+ void *_c = CNXK_MEMPOOL_CONFIG(_m); \
+ uint64_t _flags = CNXK_MEMPOOL_FLAGS(_m) | (_f); \
+ (_m)->pool_config = PLT_PTR_CAST(PLT_U64_CAST(_c) | _flags); \
+ } while (0)
+
unsigned int cnxk_mempool_get_count(const struct rte_mempool *mp);
ssize_t cnxk_mempool_calc_mem_size(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 3769afd3d1..1b6c4591bb 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,7 +72,7 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
int
cnxk_mempool_alloc(struct rte_mempool *mp)
{
- uint32_t block_count, flags = 0;
+ uint32_t block_count, flags, roc_flags = 0;
uint64_t aura_handle = 0;
struct npa_aura_s aura;
struct npa_pool_s pool;
@@ -96,15 +96,18 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
pool.nat_align = 1;
pool.buf_offset = mp->header_size / ROC_ALIGN;
- /* Use driver specific mp->pool_config to override aura config */
- if (mp->pool_config != NULL)
- memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+ flags = CNXK_MEMPOOL_FLAGS(mp);
+ if (flags & CNXK_MEMPOOL_F_ZERO_AURA) {
+ roc_flags = ROC_NPA_ZERO_AURA_F;
+ } else if (flags & CNXK_MEMPOOL_F_CUSTOM_AURA) {
+ struct npa_aura_s *paura;
- if (aura.ena && aura.pool_addr == 0)
- flags = ROC_NPA_ZERO_AURA_F;
+ paura = CNXK_MEMPOOL_CONFIG(mp);
+ memcpy(&aura, paura, sizeof(struct npa_aura_s));
+ }
rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
- &pool, flags);
+ &pool, roc_flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index aa8a378a00..cd64daacc0 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -3,6 +3,7 @@
*/
#include <cnxk_ethdev.h>
+#include <cnxk_mempool.h>
#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
@@ -43,7 +44,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
{
const char *mp_name = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
- struct npa_aura_s *aura;
struct rte_mempool *mp;
uint16_t first_skip;
int rc;
@@ -65,7 +65,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
return -EINVAL;
}
- plt_free(mp->pool_config);
rte_mempool_free(mp);
*aura_handle = 0;
@@ -84,22 +83,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
return -EIO;
}
- /* Indicate to allocate zero aura */
- aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
- if (!aura) {
- rc = -ENOMEM;
- goto free_mp;
- }
- aura->ena = 1;
- if (!mempool_name)
- aura->pool_addr = 0;
- else
- aura->pool_addr = 1; /* Any non zero value, so that alloc from next free Index */
-
- rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(), aura);
+ rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
+ mempool_name ?
+ NULL : PLT_PTR_CAST(CNXK_MEMPOOL_F_ZERO_AURA));
if (rc) {
plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
- goto free_aura;
+ goto free_mp;
}
/* Init mempool private area */
@@ -113,15 +102,13 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t buf_
rc = rte_mempool_populate_default(mp);
if (rc < 0) {
plt_err("Failed to create inline meta pool, rc=%d", rc);
- goto free_aura;
+ goto free_mp;
}
rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
*aura_handle = mp->pool_id;
*mpool = (uintptr_t)mp;
return 0;
-free_aura:
- plt_free(aura);
free_mp:
rte_mempool_free(mp);
return rc;
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs
2023-05-23 10:54 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
@ 2023-05-23 10:54 ` Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
` (2 subsequent siblings)
3 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 10:54 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
Cc: jerinj, pbhagavatula, psatheesh, asekhar, anoobj, gakhil, hkalra
Add ROC APIs which allows to create NPA auras independently and
attach it to an existing NPA pool. Also add API to destroy
NPA auras independently.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_npa.c | 219 ++++++++++++++++++++++++++++++++
drivers/common/cnxk/roc_npa.h | 4 +
drivers/common/cnxk/version.map | 2 +
3 files changed, 225 insertions(+)
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 20637fbf65..e3c925ddd1 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -85,6 +85,36 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura
return rc;
}
+static int
+npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
+{
+ struct npa_aq_enq_req *aura_init_req;
+ struct npa_aq_enq_rsp *aura_init_rsp;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ mbox = mbox_get(m_box);
+ aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (aura_init_req == NULL)
+ goto exit;
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_INIT;
+ mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+ rc = mbox_process_msg(mbox, (void **)&aura_init_rsp);
+ if (rc < 0)
+ goto exit;
+
+ if (aura_init_rsp->hdr.rc == 0)
+ rc = 0;
+ else
+ rc = NPA_ERR_AURA_POOL_INIT;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
static int
npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
{
@@ -156,6 +186,54 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
return rc;
}
+static int
+npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
+{
+ struct npa_aq_enq_req *aura_req;
+ struct npa_aq_enq_rsp *aura_rsp;
+ struct ndc_sync_op *ndc_req;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+
+ /* Procedure for disabling an aura/pool */
+ plt_delay_us(10);
+
+ mbox = mbox_get(m_box);
+ aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+ if (aura_req == NULL)
+ goto exit;
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+ aura_req->aura.ena = 0;
+ aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+ rc = mbox_process_msg(mbox, (void **)&aura_rsp);
+ if (rc < 0)
+ goto exit;
+
+ if (aura_rsp->hdr.rc != 0)
+ return NPA_ERR_AURA_POOL_FINI;
+
+ /* Sync NDC-NPA for LF */
+ ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+ if (ndc_req == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+ ndc_req->npa_lf_sync = 1;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+ rc = NPA_ERR_AURA_POOL_FINI;
+ goto exit;
+ }
+ rc = 0;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
int
roc_npa_pool_op_pc_reset(uint64_t aura_handle)
{
@@ -493,6 +571,108 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
return rc;
}
+static int
+npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id,
+ struct npa_aura_s *aura, uint64_t *aura_handle, uint32_t flags)
+{
+ int rc, aura_id;
+
+ /* Sanity check */
+ if (!lf || !aura || !aura_handle)
+ return NPA_ERR_PARAM;
+
+ roc_npa_dev_lock();
+ /* Get aura_id from resource bitmap */
+ aura_id = find_free_aura(lf, flags);
+ if (aura_id < 0) {
+ roc_npa_dev_unlock();
+ return NPA_ERR_AURA_ID_ALLOC;
+ }
+
+ /* Mark aura as reserved */
+ plt_bitmap_clear(lf->npa_bmp, aura_id);
+
+ roc_npa_dev_unlock();
+ rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
+ aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
+ NPA_ERR_AURA_ID_ALLOC :
+ 0;
+ if (rc)
+ goto exit;
+
+ /* Update aura fields */
+ aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
+ aura->ena = 1;
+ aura->shift = plt_log2_u32(block_count);
+ aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
+ aura->limit = block_count;
+ aura->pool_caching = 1;
+ aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+ aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+ aura->avg_con = 0;
+ /* Many to one reduction */
+ aura->err_qint_idx = aura_id % lf->qints;
+
+ /* Issue AURA_INIT and POOL_INIT op */
+ rc = npa_aura_init(lf->mbox, aura_id, aura);
+ if (rc)
+ return rc;
+
+ *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base);
+
+ return 0;
+
+exit:
+ return rc;
+}
+
+int
+roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
+ struct npa_aura_s *aura, int pool_id, uint32_t flags)
+{
+ struct npa_aura_s defaura;
+ struct idev_cfg *idev;
+ struct npa_lf *lf;
+ int rc;
+
+ lf = idev_npa_obj_get();
+ if (lf == NULL) {
+ rc = NPA_ERR_DEVICE_NOT_BOUNDED;
+ goto error;
+ }
+
+ idev = idev_get_cfg();
+ if (idev == NULL) {
+ rc = NPA_ERR_ALLOC;
+ goto error;
+ }
+
+ if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) {
+ rc = NPA_ERR_ALLOC;
+ goto error;
+ }
+
+ if (aura == NULL) {
+ memset(&defaura, 0, sizeof(struct npa_aura_s));
+ aura = &defaura;
+ }
+
+ rc = npa_aura_alloc(lf, block_count, pool_id, aura, aura_handle, flags);
+ if (rc) {
+ plt_err("Failed to alloc aura rc=%d", rc);
+ goto error;
+ }
+
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, *aura_handle);
+
+ /* Just hold the reference of the object */
+ __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);
+error:
+ return rc;
+}
+
int
roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)
{
@@ -561,6 +741,45 @@ roc_npa_pool_destroy(uint64_t aura_handle)
return rc;
}
+static int
+npa_aura_free(struct npa_lf *lf, uint64_t aura_handle)
+{
+ int aura_id, rc;
+
+ if (!lf || !aura_handle)
+ return NPA_ERR_PARAM;
+
+ aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ rc = npa_aura_fini(lf->mbox, aura_id);
+
+ if (rc)
+ return rc;
+
+ memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr));
+
+ roc_npa_dev_lock();
+ plt_bitmap_set(lf->npa_bmp, aura_id);
+ roc_npa_dev_unlock();
+
+ return rc;
+}
+
+int
+roc_npa_aura_destroy(uint64_t aura_handle)
+{
+ struct npa_lf *lf = idev_npa_obj_get();
+ int rc = 0;
+
+ plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
+ rc = npa_aura_free(lf, aura_handle);
+ if (rc)
+ plt_err("Failed to destroy aura rc=%d", rc);
+
+ /* Release the reference of npa */
+ rc |= npa_lf_fini();
+ return rc;
+}
+
int
roc_npa_pool_range_update_check(uint64_t aura_handle)
{
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index dd588b0322..df15dabe92 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,10 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
+ struct npa_aura_s *aura, int pool_id,
+ uint32_t flags);
+int __roc_api roc_npa_aura_destroy(uint64_t aura_handle);
uint64_t __roc_api roc_npa_zero_aura_handle(void);
int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index b298a21b84..9414b55e9c 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -347,6 +347,8 @@ INTERNAL {
roc_nix_vlan_mcam_entry_write;
roc_nix_vlan_strip_vtag_ena_dis;
roc_nix_vlan_tpid_set;
+ roc_npa_aura_create;
+ roc_npa_aura_destroy;
roc_npa_buf_type_mask;
roc_npa_buf_type_limit_get;
roc_npa_buf_type_update;
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs
2023-05-23 10:54 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
@ 2023-05-23 10:54 ` Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
3 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 10:54 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, psatheesh, anoobj, gakhil, hkalra
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.
Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.
The existing aura range set functionality has been moved
as a pool range set API.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/common/cnxk/roc_nix_queue.c | 2 +-
drivers/common/cnxk/roc_npa.c | 35 ++++++++++++++++++++++++-
drivers/common/cnxk/roc_npa.h | 6 +++++
drivers/common/cnxk/roc_sso.c | 2 +-
drivers/common/cnxk/version.map | 2 ++
drivers/mempool/cnxk/cnxk_mempool_ops.c | 2 +-
6 files changed, 45 insertions(+), 4 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
goto npa_fail;
}
- roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+ roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
}
void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
uint64_t end_iova)
{
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
+ /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
}
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+ uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_lf *lf = idev_npa_obj_get();
+ struct npa_aura_lim *lim;
+
+ PLT_ASSERT(lf);
+ lim = lf->aura_lim;
+
+ /* Change only the bookkeeping in software */
+ lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+ lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+ uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+ struct npa_aura_lim *lim;
+ struct npa_lf *lf;
+
+ lf = idev_npa_obj_get();
+ PLT_ASSERT(lf);
+
+ lim = lf->aura_lim;
+ *start_iova = lim[aura_id].ptr_start;
+ *end_iova = lim[aura_id].ptr_end;
+}
+
static int
npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura,
struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+ uint64_t *start_iova,
+ uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+ uint64_t start_iova,
+ uint64_t end_iova);
int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
struct npa_aura_s *aura, int pool_id,
uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
- roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+ roc_npa_pool_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
if (roc_npa_aura_op_available_wait(xaq->aura_handle, xaq->nb_xaq, 0) !=
xaq->nb_xaq) {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 9414b55e9c..5281c71550 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -354,6 +354,7 @@ INTERNAL {
roc_npa_buf_type_update;
roc_npa_aura_drop_set;
roc_npa_aura_limit_modify;
+ roc_npa_aura_op_range_get;
roc_npa_aura_op_range_set;
roc_npa_ctx_dump;
roc_npa_dev_fini;
@@ -365,6 +366,7 @@ INTERNAL {
roc_npa_pool_create;
roc_npa_pool_destroy;
roc_npa_pool_op_pc_reset;
+ roc_npa_pool_op_range_set;
roc_npa_pool_range_update_check;
roc_npa_zero_aura_handle;
roc_npc_fini;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 1b6c4591bb..a1aeaee746 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -174,7 +174,7 @@ cnxk_mempool_populate(struct rte_mempool *mp, unsigned int max_objs,
plt_npa_dbg("requested objects %" PRIu64 ", possible objects %" PRIu64
"", (uint64_t)max_objs, (uint64_t)num_elts);
- roc_npa_aura_op_range_set(mp->pool_id, iova,
+ roc_npa_pool_op_range_set(mp->pool_id, iova,
iova + num_elts * total_elt_sz);
if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 4/5] mempool/cnxk: add hwpool ops
2023-05-23 10:54 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs Ashwin Sekhar T K
@ 2023-05-23 10:54 ` Ashwin Sekhar T K
2023-05-23 10:54 ` [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools Ashwin Sekhar T K
3 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 10:54 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, skori, skoteshwar, kirankumark, psatheesh, anoobj,
gakhil, hkalra, ndabilpuram
Add hwpool ops which can used to create a rte_mempool that attaches
to another rte_mempool. The hwpool will not have its own buffers and
will have a dummy populate callback. Only an NPA aura will be allocated
for this rte_mempool. The buffers will be allocate from the NPA pool
of the attached rte_mempool.
Only mbuf objects are supported in hwpool. Generic objects are not
supported. Note that this pool will not have any range check enabled.
So user will be able to free any pointer into this pool. HW will not
throw error interrupts if invalid buffers are passed. So user must be
careful when using this pool.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cn10k_hwpool_ops.c | 211 ++++++++++++++++++++++++
drivers/mempool/cnxk/cnxk_mempool.h | 4 +
drivers/mempool/cnxk/meson.build | 1 +
3 files changed, 216 insertions(+)
create mode 100644 drivers/mempool/cnxk/cn10k_hwpool_ops.c
diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
new file mode 100644
index 0000000000..9238765155
--- /dev/null
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include <rte_mempool.h>
+
+#include "roc_api.h"
+#include "cnxk_mempool.h"
+
+#define CN10K_HWPOOL_MEM_SIZE 128
+
+static int __rte_hot
+cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int n)
+{
+ struct rte_mempool *mp;
+ unsigned int index;
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ /* Ensure mbuf init changes are written before the free pointers
+ * are enqueued to the stack.
+ */
+ rte_io_wmb();
+ for (index = 0; index < n; index++) {
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mbuf *m;
+
+ m = PLT_PTR_CAST(obj_table[index]);
+ /* Update mempool information in the mbuf */
+ hdr = rte_mempool_get_header(obj_table[index]);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (hdr->mp != m->pool || hdr->mp != hp)
+ plt_err("Pool Header Mismatch");
+#endif
+ m->pool = mp;
+ hdr->mp = mp;
+ roc_npa_aura_op_free(hp->pool_id, 0,
+ (uint64_t)obj_table[index]);
+ }
+
+ return 0;
+}
+
+static int __rte_hot
+cn10k_hwpool_deq(struct rte_mempool *hp, void **obj_table, unsigned int n)
+{
+ unsigned int index;
+ uint64_t obj;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool *mp;
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+#endif
+
+ for (index = 0; index < n; index++, obj_table++) {
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mbuf *m;
+ int retry = 4;
+
+ /* Retry few times before failing */
+ do {
+ obj = roc_npa_aura_op_alloc(hp->pool_id, 0);
+ } while (retry-- && (obj == 0));
+
+ if (obj == 0) {
+ cn10k_hwpool_enq(hp, obj_table - index, index);
+ return -ENOENT;
+ }
+ /* Update mempool information in the mbuf */
+ hdr = rte_mempool_get_header(PLT_PTR_CAST(obj));
+ m = PLT_PTR_CAST(obj);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (hdr->mp != m->pool || hdr->mp != mp)
+ plt_err("Pool Header Mismatch");
+#endif
+ m->pool = hp;
+ hdr->mp = hp;
+ *obj_table = (void *)obj;
+ }
+
+ return 0;
+}
+
+static unsigned int
+cn10k_hwpool_get_count(const struct rte_mempool *hp)
+{
+ return (unsigned int)roc_npa_aura_op_available(hp->pool_id);
+}
+
+static int
+cn10k_hwpool_alloc(struct rte_mempool *hp)
+{
+ uint64_t aura_handle = 0;
+ struct rte_mempool *mp;
+ uint32_t pool_id;
+ int rc;
+
+ if (hp->cache_size) {
+ plt_err("Hwpool does not support cache");
+ return -EINVAL;
+ }
+
+ if (CNXK_MEMPOOL_FLAGS(hp)) {
+ plt_err("Flags must not be passed to hwpool ops");
+ return -EINVAL;
+ }
+
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ if (!mp) {
+ plt_err("Invalid rte_mempool passed as pool_config");
+ return -EINVAL;
+ }
+ if (mp->cache_size) {
+ plt_err("Hwpool does not support attaching to pool with cache");
+ return -EINVAL;
+ }
+
+ if (hp->elt_size != mp->elt_size ||
+ hp->header_size != mp->header_size ||
+ hp->trailer_size != mp->trailer_size || hp->size != mp->size) {
+ plt_err("Hwpool parameters matching with master pool");
+ return -EINVAL;
+ }
+
+ /* Create the NPA aura */
+ pool_id = roc_npa_aura_handle_to_aura(mp->pool_id);
+ rc = roc_npa_aura_create(&aura_handle, hp->size, NULL, (int)pool_id, 0);
+ if (rc) {
+ plt_err("Failed to create aura rc=%d", rc);
+ return rc;
+ }
+
+ /* Set the flags for the hardware pool */
+ CNXK_MEMPOOL_SET_FLAGS(hp, CNXK_MEMPOOL_F_IS_HWPOOL);
+ hp->pool_id = aura_handle;
+ plt_npa_dbg("aura_handle=0x%" PRIx64, aura_handle);
+
+ return 0;
+}
+
+static void
+cn10k_hwpool_free(struct rte_mempool *hp)
+{
+ int rc = 0;
+
+ plt_npa_dbg("aura_handle=0x%" PRIx64, hp->pool_id);
+ /* It can happen that rte_mempool_free() is called immediately after
+ * rte_mempool_create_empty(). In such cases the NPA pool will not be
+ * allocated.
+ */
+ if (roc_npa_aura_handle_to_base(hp->pool_id) == 0)
+ return;
+
+ rc = roc_npa_aura_destroy(hp->pool_id);
+ if (rc)
+ plt_err("Failed to destroy aura rc=%d", rc);
+}
+
+static ssize_t
+cn10k_hwpool_calc_mem_size(const struct rte_mempool *hp, uint32_t obj_num,
+ uint32_t pg_shift, size_t *min_chunk_size,
+ size_t *align)
+{
+ RTE_SET_USED(hp);
+ RTE_SET_USED(obj_num);
+ RTE_SET_USED(pg_shift);
+ *min_chunk_size = CN10K_HWPOOL_MEM_SIZE;
+ *align = CN10K_HWPOOL_MEM_SIZE;
+ /* Return a minimum mem size so that hwpool can also be initialized just
+ * like a regular pool. This memzone will not be used anywhere.
+ */
+ return CN10K_HWPOOL_MEM_SIZE;
+}
+
+static int
+cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ uint64_t start_iova, end_iova;
+ struct rte_mempool *mp;
+
+ RTE_SET_USED(max_objs);
+ RTE_SET_USED(vaddr);
+ RTE_SET_USED(iova);
+ RTE_SET_USED(len);
+ RTE_SET_USED(obj_cb);
+ RTE_SET_USED(obj_cb_arg);
+ /* HW pools does not require populating anything as these pools are
+ * only associated with NPA aura. The NPA pool being used is that of
+ * another rte_mempool. Only copy the iova range from the aura of
+ * the other rte_mempool to this pool's aura.
+ */
+ mp = CNXK_MEMPOOL_CONFIG(hp);
+ roc_npa_aura_op_range_get(mp->pool_id, &start_iova, &end_iova);
+ roc_npa_aura_op_range_set(hp->pool_id, start_iova, end_iova);
+
+ return hp->size;
+}
+
+static struct rte_mempool_ops cn10k_hwpool_ops = {
+ .name = "cn10k_hwpool_ops",
+ .alloc = cn10k_hwpool_alloc,
+ .free = cn10k_hwpool_free,
+ .enqueue = cn10k_hwpool_enq,
+ .dequeue = cn10k_hwpool_deq,
+ .get_count = cn10k_hwpool_get_count,
+ .calc_mem_size = cn10k_hwpool_calc_mem_size,
+ .populate = cn10k_hwpool_populate,
+};
+
+RTE_MEMPOOL_REGISTER_OPS(cn10k_hwpool_ops);
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index fc2e4b5b70..4ca05d53e1 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -16,6 +16,10 @@ enum cnxk_mempool_flags {
* as pool config to create the pool.
*/
CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+ /* This flag indicates whether the pool is a hardware pool or not.
+ * This flag is set by the driver.
+ */
+ CNXK_MEMPOOL_F_IS_HWPOOL = RTE_BIT64(2),
};
#define CNXK_MEMPOOL_F_MASK 0xFUL
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
index 50856ecde8..ce152bedd2 100644
--- a/drivers/mempool/cnxk/meson.build
+++ b/drivers/mempool/cnxk/meson.build
@@ -14,6 +14,7 @@ sources = files(
'cnxk_mempool_telemetry.c',
'cn9k_mempool_ops.c',
'cn10k_mempool_ops.c',
+ 'cn10k_hwpool_ops.c',
)
deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools
2023-05-23 10:54 ` [PATCH v2 1/5] mempool/cnxk: use pool config to pass flags Ashwin Sekhar T K
` (2 preceding siblings ...)
2023-05-23 10:54 ` [PATCH v2 4/5] mempool/cnxk: add hwpool ops Ashwin Sekhar T K
@ 2023-05-23 10:54 ` Ashwin Sekhar T K
3 siblings, 0 replies; 22+ messages in thread
From: Ashwin Sekhar T K @ 2023-05-23 10:54 UTC (permalink / raw)
To: dev, Ashwin Sekhar T K, Pavan Nikhilesh
Cc: jerinj, skori, skoteshwar, kirankumark, psatheesh, anoobj,
gakhil, hkalra, ndabilpuram
Add the following cnxk mempool PMD APIs to facilitate exchanging mbufs
between pools.
* rte_pmd_cnxk_mempool_is_hwpool() - Allows user to check whether a pool
is hwpool or not.
* rte_pmd_cnxk_mempool_range_check_disable() - Disables range checking on
any rte_mempool.
* rte_pmd_cnxk_mempool_mbuf_exchange() - Exchanges mbufs between any two
rte_mempool where the range check is disabled.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
doc/api/doxy-api-index.md | 1 +
doc/api/doxy-api.conf.in | 1 +
drivers/mempool/cnxk/cn10k_hwpool_ops.c | 63 ++++++++++++++++++++-
drivers/mempool/cnxk/cnxk_mempool.h | 4 ++
drivers/mempool/cnxk/meson.build | 1 +
drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h | 56 ++++++++++++++++++
drivers/mempool/cnxk/version.map | 10 ++++
7 files changed, 135 insertions(+), 1 deletion(-)
create mode 100644 drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
create mode 100644 drivers/mempool/cnxk/version.map
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index c709fd48ad..a781b8f408 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -49,6 +49,7 @@ The public API headers are grouped by topics:
[iavf](@ref rte_pmd_iavf.h),
[bnxt](@ref rte_pmd_bnxt.h),
[cnxk](@ref rte_pmd_cnxk.h),
+ [cnxk_mempool](@ref rte_pmd_cnxk_mempool.h),
[dpaa](@ref rte_pmd_dpaa.h),
[dpaa2](@ref rte_pmd_dpaa2.h),
[mlx5](@ref rte_pmd_mlx5.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index d230a19e1f..7e68e43c64 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -9,6 +9,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
@TOPDIR@/drivers/crypto/scheduler \
@TOPDIR@/drivers/dma/dpaa2 \
@TOPDIR@/drivers/event/dlb2 \
+ @TOPDIR@/drivers/mempool/cnxk \
@TOPDIR@/drivers/mempool/dpaa2 \
@TOPDIR@/drivers/net/ark \
@TOPDIR@/drivers/net/bnxt \
diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
index 9238765155..b234481ec1 100644
--- a/drivers/mempool/cnxk/cn10k_hwpool_ops.c
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -3,11 +3,14 @@
*/
#include <rte_mempool.h>
+#include <rte_pmd_cnxk_mempool.h>
#include "roc_api.h"
#include "cnxk_mempool.h"
-#define CN10K_HWPOOL_MEM_SIZE 128
+#define CN10K_HWPOOL_MEM_SIZE 128
+#define CN10K_NPA_IOVA_RANGE_MIN 0x0
+#define CN10K_NPA_IOVA_RANGE_MAX 0x1fffffffffff80
static int __rte_hot
cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int n)
@@ -197,6 +200,64 @@ cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int max_objs,
return hp->size;
}
+int
+rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1, struct rte_mbuf *m2)
+{
+ struct rte_mempool_objhdr *hdr;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(CNXK_MEMPOOL_FLAGS(m1->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK) ||
+ !(CNXK_MEMPOOL_FLAGS(m2->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)) {
+ plt_err("Pools must have range check disabled");
+ return -EINVAL;
+ }
+ if (m1->pool->elt_size != m2->pool->elt_size ||
+ m1->pool->header_size != m2->pool->header_size ||
+ m1->pool->trailer_size != m2->pool->trailer_size ||
+ m1->pool->size != m2->pool->size) {
+ plt_err("Parameters of pools involved in exchange does not match");
+ return -EINVAL;
+ }
+#endif
+ RTE_SWAP(m1->pool, m2->pool);
+ hdr = rte_mempool_get_header(m1);
+ hdr->mp = m1->pool;
+ hdr = rte_mempool_get_header(m2);
+ hdr->mp = m2->pool;
+ return 0;
+}
+
+int
+rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp)
+{
+ return !!(CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_IS_HWPOOL);
+}
+
+int
+rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp)
+{
+ if (rte_pmd_cnxk_mempool_is_hwpool(mp)) {
+ /* Disable only aura range check for hardware pools */
+ roc_npa_aura_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+ CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+ mp = CNXK_MEMPOOL_CONFIG(mp);
+ }
+
+ /* No need to disable again if already disabled */
+ if (CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)
+ return 0;
+
+ /* Disable aura/pool range check */
+ roc_npa_pool_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+ if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
+ return -EBUSY;
+
+ CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+ return 0;
+}
+
static struct rte_mempool_ops cn10k_hwpool_ops = {
.name = "cn10k_hwpool_ops",
.alloc = cn10k_hwpool_alloc,
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
index 4ca05d53e1..669e617952 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -20,6 +20,10 @@ enum cnxk_mempool_flags {
* This flag is set by the driver.
*/
CNXK_MEMPOOL_F_IS_HWPOOL = RTE_BIT64(2),
+ /* This flag indicates whether range check has been disabled for
+ * the pool. This flag is set by the driver.
+ */
+ CNXK_MEMPOOL_F_NO_RANGE_CHECK = RTE_BIT64(3),
};
#define CNXK_MEMPOOL_F_MASK 0xFUL
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
index ce152bedd2..e388cce26a 100644
--- a/drivers/mempool/cnxk/meson.build
+++ b/drivers/mempool/cnxk/meson.build
@@ -17,5 +17,6 @@ sources = files(
'cn10k_hwpool_ops.c',
)
+headers = files('rte_pmd_cnxk_mempool.h')
deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
require_iova_in_mbuf = false
diff --git a/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h b/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
new file mode 100644
index 0000000000..ada6e7cd4d
--- /dev/null
+++ b/drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+/**
+ * @file rte_pmd_cnxk_mempool.h
+ * Marvell CNXK Mempool PMD specific functions.
+ *
+ **/
+
+#ifndef _PMD_CNXK_MEMPOOL_H_
+#define _PMD_CNXK_MEMPOOL_H_
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+
+/**
+ * Exchange mbufs between two mempools.
+ *
+ * @param m1
+ * First mbuf
+ * @param m2
+ * Second mbuf
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1,
+ struct rte_mbuf *m2);
+
+/**
+ * Check whether a mempool is a hwpool.
+ *
+ * @param mp
+ * Mempool to check.
+ *
+ * @return
+ * 1 if mp is a hwpool, 0 otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp);
+
+/**
+ * Disable buffer address range check on a mempool.
+ *
+ * @param mp
+ * Mempool to disable range check on.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+__rte_experimental
+int rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp);
+
+#endif /* _PMD_CNXK_MEMPOOL_H_ */
diff --git a/drivers/mempool/cnxk/version.map b/drivers/mempool/cnxk/version.map
new file mode 100644
index 0000000000..755731e3b5
--- /dev/null
+++ b/drivers/mempool/cnxk/version.map
@@ -0,0 +1,10 @@
+ DPDK_23 {
+ local: *;
+ };
+
+ EXPERIMENTAL {
+ global:
+ rte_pmd_cnxk_mempool_is_hwpool;
+ rte_pmd_cnxk_mempool_mbuf_exchange;
+ rte_pmd_cnxk_mempool_range_check_disable;
+ };
--
2.25.1
^ permalink raw reply [flat|nested] 22+ messages in thread