From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
"Kiran Kumar K" <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>, Ray Kinsella <mdr@ashroe.eu>,
"Pavan Nikhilesh" <pbhagavatula@marvell.com>,
Shijith Thotton <sthotton@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free
Date: Wed, 3 Nov 2021 06:22:09 +0530 [thread overview]
Message-ID: <20211103005213.2066-1-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20210902070034.1086-1-pbhagavatula@marvell.com>
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add common API to create and free SSO XAQ pool.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
v2 Changes:
- Merge patchsets 19356,18614 to avoid merge conflicts.
- Rebase onto main.
drivers/common/cnxk/roc_sso.c | 124 ++++++++++++++++++++++++++++
drivers/common/cnxk/roc_sso.h | 14 ++++
drivers/common/cnxk/roc_sso_priv.h | 5 ++
drivers/common/cnxk/version.map | 2 +
drivers/event/cnxk/cn10k_eventdev.c | 2 +
5 files changed, 147 insertions(+)
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 762893f3dc..45ff16ca0e 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -5,6 +5,8 @@
#include "roc_api.h"
#include "roc_priv.h"
+#define SSO_XAQ_CACHE_CNT (0x7)
+
/* Private functions. */
int
sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf,
@@ -387,6 +389,128 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
return mbox_process(dev->mbox);
}
+int
+sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+ uint32_t nb_xae, uint32_t xae_waes,
+ uint32_t xaq_buf_size, uint16_t nb_hwgrp)
+{
+ struct npa_pool_s pool;
+ struct npa_aura_s aura;
+ plt_iova_t iova;
+ uint32_t i;
+ int rc;
+
+ if (xaq->mem != NULL) {
+ rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
+ if (rc < 0) {
+ plt_err("Failed to release XAQ %d", rc);
+ return rc;
+ }
+ roc_npa_pool_destroy(xaq->aura_handle);
+ plt_free(xaq->fc);
+ plt_free(xaq->mem);
+ memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+ }
+
+ xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
+ if (xaq->fc == NULL) {
+ plt_err("Failed to allocate XAQ FC");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ xaq->nb_xae = nb_xae;
+
+ /* Taken from HRM 14.3.3(4) */
+ xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp);
+ xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq);
+
+ xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size);
+ if (xaq->mem == NULL) {
+ plt_err("Failed to allocate XAQ mem");
+ rc = -ENOMEM;
+ goto free_fc;
+ }
+
+ memset(&pool, 0, sizeof(struct npa_pool_s));
+ pool.nat_align = 1;
+
+ memset(&aura, 0, sizeof(aura));
+ aura.fc_ena = 1;
+ aura.fc_addr = (uint64_t)xaq->fc;
+ aura.fc_hyst_bits = 0; /* Store count on all updates */
+ rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
+ &aura, &pool);
+ if (rc) {
+ plt_err("Failed to create XAQ pool");
+ goto npa_fail;
+ }
+
+ iova = (uint64_t)xaq->mem;
+ for (i = 0; i < xaq->nb_xaq; i++) {
+ roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
+ iova += xaq_buf_size;
+ }
+ roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+
+ /* When SW does addwork (enqueue) check if there is space in XAQ by
+ * comparing fc_addr above against the xaq_lmt calculated below.
+ * There should be a minimum headroom of 7 XAQs per HWGRP for SSO
+ * to request XAQ to cache them even before enqueue is called.
+ */
+ xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT);
+
+ return 0;
+npa_fail:
+ plt_free(xaq->mem);
+free_fc:
+ plt_free(xaq->fc);
+fail:
+ memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+ return rc;
+}
+
+int
+roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
+{
+ struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+
+ return sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
+ roc_sso->xae_waes, roc_sso->xaq_buf_size,
+ roc_sso->nb_hwgrp);
+}
+
+int
+sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+ uint16_t nb_hwgrp)
+{
+ int rc;
+
+ if (xaq->mem != NULL) {
+ if (nb_hwgrp) {
+ rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
+ if (rc < 0) {
+ plt_err("Failed to release XAQ %d", rc);
+ return rc;
+ }
+ }
+ roc_npa_pool_destroy(xaq->aura_handle);
+ plt_free(xaq->fc);
+ plt_free(xaq->mem);
+ }
+ memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+
+ return 0;
+}
+
+int
+roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
+{
+ struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+
+ return sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
+}
+
int
sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps)
{
diff --git a/drivers/common/cnxk/roc_sso.h b/drivers/common/cnxk/roc_sso.h
index b28f6089cc..27d49c6c68 100644
--- a/drivers/common/cnxk/roc_sso.h
+++ b/drivers/common/cnxk/roc_sso.h
@@ -27,6 +27,15 @@ struct roc_sso_hwgrp_stats {
uint64_t page_cnt;
};
+struct roc_sso_xaq_data {
+ uint32_t nb_xaq;
+ uint32_t nb_xae;
+ uint32_t xaq_lmt;
+ uint64_t aura_handle;
+ void *fc;
+ void *mem;
+};
+
struct roc_sso {
struct plt_pci_device *pci_dev;
/* Public data. */
@@ -35,6 +44,7 @@ struct roc_sso {
uint16_t nb_hwgrp;
uint8_t nb_hws;
uintptr_t lmt_base;
+ struct roc_sso_xaq_data xaq;
/* HW Const. */
uint32_t xae_waes;
uint32_t xaq_buf_size;
@@ -95,6 +105,10 @@ int __roc_api roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso,
uintptr_t __roc_api roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws);
uintptr_t __roc_api roc_sso_hwgrp_base_get(struct roc_sso *roc_sso,
uint16_t hwgrp);
+int __roc_api roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso,
+ uint32_t nb_xae);
+int __roc_api roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso,
+ uint16_t nb_hwgrp);
/* Debug */
void __roc_api roc_sso_dump(struct roc_sso *roc_sso, uint8_t nb_hws,
diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h
index 8dffa3fbf4..2e1b025d1c 100644
--- a/drivers/common/cnxk/roc_sso_priv.h
+++ b/drivers/common/cnxk/roc_sso_priv.h
@@ -47,6 +47,11 @@ void sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp,
uint16_t hwgrp[], uint16_t n, uint16_t enable);
int sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps);
int sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps);
+int sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+ uint32_t nb_xae, uint32_t xae_waes,
+ uint32_t xaq_buf_size, uint16_t nb_hwgrp);
+int sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+ uint16_t nb_hwgrp);
/* SSO IRQ */
int sso_register_irqs_priv(struct roc_sso *roc_sso,
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 8d4d42f476..cbf4a4137b 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -310,7 +310,9 @@ INTERNAL {
roc_sso_dump;
roc_sso_hwgrp_alloc_xaq;
roc_sso_hwgrp_base_get;
+ roc_sso_hwgrp_free_xaq_aura;
roc_sso_hwgrp_hws_link_status;
+ roc_sso_hwgrp_init_xaq_aura;
roc_sso_hwgrp_qos_config;
roc_sso_hwgrp_release_xaq;
roc_sso_hwgrp_set_priority;
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index e287448189..2fb4ea878e 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -132,6 +132,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
plt_write64(0, base + SSO_LF_GGRP_QCTL);
+ plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
req = queue_id; /* GGRP ID */
req |= BIT_ULL(18); /* Grouped */
req |= BIT_ULL(16); /* WAIT */
@@ -177,6 +178,7 @@ cn10k_sso_hws_reset(void *arg, void *hws)
} gw;
uint8_t pend_tt;
+ plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
/* Wait till getwork/swtp/waitw/desched completes. */
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
--
2.17.1
next prev parent reply other threads:[~2021-11-03 0:52 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-02 7:00 [dpdk-dev] [PATCH 1/2] " pbhagavatula
2021-09-02 7:00 ` [dpdk-dev] [PATCH 2/2] event/cnxk: use common XAQ pool APIs pbhagavatula
2021-11-03 0:52 ` pbhagavatula [this message]
2021-11-03 0:52 ` [dpdk-dev] [PATCH v2 2/5] " pbhagavatula
2021-11-03 0:52 ` [dpdk-dev] [PATCH v2 3/5] event/cnxk: fix packet Tx overflow pbhagavatula
2021-11-03 0:52 ` [dpdk-dev] [PATCH v2 4/5] event/cnxk: reduce workslot memory consumption pbhagavatula
2021-11-03 0:52 ` [dpdk-dev] [PATCH v2 5/5] event/cnxk: rework enqueue path pbhagavatula
2021-11-04 7:41 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211103005213.2066-1-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=mdr@ashroe.eu \
--cc=ndabilpuram@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
--cc=sthotton@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).