patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Shijith Thotton <sthotton@marvell.com>
To: <stable@dpdk.org>
Cc: Pavan Nikhilesh <pbhagavatula@marvell.com>, <xuemingl@nvidia.com>
Subject: [dpdk-stable] [PATCH 20.11 3/3] event/octeontx2: fix XAQ pool reconfigure
Date: Thu, 17 Jun 2021 00:12:10 +0530	[thread overview]
Message-ID: <c110ea6e5786321cd53e0b69bf3dfa84e28c8801.1623868710.git.sthotton@marvell.com> (raw)
In-Reply-To: <cover.1623868710.git.sthotton@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

[ upstream commit 052a5d38676373971cf2840c695a1421d2ac3045 ]

When XAQ pool is being re-configured, and if the same memzone
is used for fc_mem when freeing the old mempool the fc_mem
will be incorrectly updated with the free count.

Fixes: ffa4ec0b6063 ("event/octeontx2: allow adapters to resize inflight buffers")

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/common/octeontx2/otx2_mbox.h |  7 +++++++
 drivers/event/octeontx2/otx2_evdev.c | 31 ++++++++++++++++++++++------
 2 files changed, 32 insertions(+), 6 deletions(-)

diff --git a/drivers/common/octeontx2/otx2_mbox.h b/drivers/common/octeontx2/otx2_mbox.h
index f6d884c19..51b25ab42 100644
--- a/drivers/common/octeontx2/otx2_mbox.h
+++ b/drivers/common/octeontx2/otx2_mbox.h
@@ -177,6 +177,8 @@ M(SSO_GRP_GET_STATS,	0x609, sso_grp_get_stats, sso_info_req,		\
 				sso_grp_stats)				\
 M(SSO_HWS_GET_STATS,	0x610, sso_hws_get_stats, sso_info_req,		\
 				sso_hws_stats)				\
+M(SSO_HW_RELEASE_XAQ,	0x611, sso_hw_release_xaq_aura,			\
+				sso_release_xaq, msg_rsp)		\
 /* TIM mbox IDs (range 0x800 - 0x9FF) */				\
 M(TIM_LF_ALLOC,		0x800, tim_lf_alloc, tim_lf_alloc_req,		\
 				tim_lf_alloc_rsp)			\
@@ -1177,6 +1179,11 @@ struct sso_hw_setconfig {
 	uint16_t __otx2_io hwgrps;
 };
 
+struct sso_release_xaq {
+	struct mbox_msghdr hdr;
+	uint16_t __otx2_io hwgrps;
+};
+
 struct sso_info_req {
 	struct mbox_msghdr hdr;
 	union {
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 3afb5a30e..dd75b9f85 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -981,7 +981,7 @@ sso_xaq_allocate(struct otx2_sso_evdev *dev)
 
 	dev->fc_iova = mz->iova;
 	dev->fc_mem = mz->addr;
-
+	*dev->fc_mem = 0;
 	aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
 	memset(aura, 0, sizeof(struct npa_aura_s));
 
@@ -1057,6 +1057,19 @@ sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
 	return otx2_mbox_process(mbox);
 }
 
+static int
+sso_ggrp_free_xaq(struct otx2_sso_evdev *dev)
+{
+	struct otx2_mbox *mbox = dev->mbox;
+	struct sso_release_xaq *req;
+
+	otx2_sso_dbg("Freeing XAQ for GGRPs");
+	req = otx2_mbox_alloc_msg_sso_hw_release_xaq_aura(mbox);
+	req->hwgrps = dev->nb_event_queues;
+
+	return otx2_mbox_process(mbox);
+}
+
 static void
 sso_lf_teardown(struct otx2_sso_evdev *dev,
 		enum otx2_sso_lf_type lf_type)
@@ -1447,6 +1460,8 @@ sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
 			ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
 			ws->swtag_req = 0;
 			ws->vws = 0;
+			ws->fc_mem = dev->fc_mem;
+			ws->xaq_lmt = dev->xaq_lmt;
 			ws->ws_state[0].cur_grp = 0;
 			ws->ws_state[0].cur_tt = SSO_SYNC_EMPTY;
 			ws->ws_state[1].cur_grp = 0;
@@ -1457,6 +1472,8 @@ sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
 			ws = event_dev->data->ports[i];
 			ssogws_reset(ws);
 			ws->swtag_req = 0;
+			ws->fc_mem = dev->fc_mem;
+			ws->xaq_lmt = dev->xaq_lmt;
 			ws->cur_grp = 0;
 			ws->cur_tt = SSO_SYNC_EMPTY;
 		}
@@ -1503,28 +1520,30 @@ int
 sso_xae_reconfigure(struct rte_eventdev *event_dev)
 {
 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
-	struct rte_mempool *prev_xaq_pool;
 	int rc = 0;
 
 	if (event_dev->data->dev_started)
 		sso_cleanup(event_dev, 0);
 
-	prev_xaq_pool = dev->xaq_pool;
+	rc = sso_ggrp_free_xaq(dev);
+	if (rc < 0) {
+		otx2_err("Failed to free XAQ\n");
+		return rc;
+	}
+
+	rte_mempool_free(dev->xaq_pool);
 	dev->xaq_pool = NULL;
 	rc = sso_xaq_allocate(dev);
 	if (rc < 0) {
 		otx2_err("Failed to alloc xaq pool %d", rc);
-		rte_mempool_free(prev_xaq_pool);
 		return rc;
 	}
 	rc = sso_ggrp_alloc_xaq(dev);
 	if (rc < 0) {
 		otx2_err("Failed to alloc xaq to ggrp %d", rc);
-		rte_mempool_free(prev_xaq_pool);
 		return rc;
 	}
 
-	rte_mempool_free(prev_xaq_pool);
 	rte_mb();
 	if (event_dev->data->dev_started)
 		sso_cleanup(event_dev, 1);
-- 
2.25.1


  parent reply	other threads:[~2021-06-16 18:43 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-16 18:42 [dpdk-stable] [PATCH 20.11 0/3] backport of octeontx2 patches Shijith Thotton
2021-06-16 18:42 ` [dpdk-stable] [PATCH 20.11 1/3] event/octeontx2: fix crypto adapter queue pair operations Shijith Thotton
2021-06-16 18:42 ` [dpdk-stable] [PATCH 20.11 2/3] event/octeontx2: configure crypto adapter xaq pool Shijith Thotton
2021-06-16 18:42 ` Shijith Thotton [this message]
2021-06-17  2:54 ` [dpdk-stable] [PATCH 20.11 0/3] backport of octeontx2 patches Xueming(Steven) Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c110ea6e5786321cd53e0b69bf3dfa84e28c8801.1623868710.git.sthotton@marvell.com \
    --to=sthotton@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=stable@dpdk.org \
    --cc=xuemingl@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).