From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
"Shijith Thotton" <sthotton@marvell.com>
Cc: <dev@dpdk.org>, <stable@dpdk.org>
Subject: [PATCH] event/cnxk: fix stale data in workslots
Date: Mon, 25 Jul 2022 14:05:45 +0530 [thread overview]
Message-ID: <20220725083545.2271-1-pbhagavatula@marvell.com> (raw)
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Fix stale XAQ depth check pointers in workslot memory after
XAQ pool resize.
Fixes: bd64a963d2fc ("event/cnxk: use common XAQ pool functions")
Cc: stable@dpdk.org
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
drivers/event/cnxk/cn10k_eventdev.c | 21 ++++++++++++++++---
drivers/event/cnxk/cn9k_eventdev.c | 31 +++++++++++++++++++++++------
drivers/event/cnxk/cnxk_tim_evdev.c | 6 +++++-
drivers/event/cnxk/cnxk_tim_evdev.h | 6 +++++-
4 files changed, 53 insertions(+), 11 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index db8dc2a9ce..ea6dadd7b7 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -701,8 +701,11 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, u
for (i = 0; i < dev->nb_event_ports; i++) {
struct cn10k_sso_hws *ws = event_dev->data->ports[i];
- ws->lookup_mem = lookup_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
+ ws->fc_mem = (uint64_t *)dev->fc_iova;
ws->tstamp = dev->tstamp;
+ if (lookup_mem)
+ ws->lookup_mem = lookup_mem;
if (meta_aura)
ws->meta_aura = meta_aura;
}
@@ -894,6 +897,7 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_event *event)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ int ret;
RTE_SET_USED(event);
@@ -903,7 +907,10 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
dev->is_ca_internal_port = 1;
cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
- return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+ ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+ cn10k_sso_set_priv_mem(event_dev, NULL, 0);
+
+ return ret;
}
static int
@@ -917,6 +924,14 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
}
+static int
+cn10k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
+ uint32_t *caps, const struct event_timer_adapter_ops **ops)
+{
+ return cnxk_tim_caps_get(evdev, flags, caps, ops,
+ cn10k_sso_set_priv_mem);
+}
+
static struct eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
@@ -950,7 +965,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.eth_tx_adapter_stop = cnxk_sso_tx_adapter_stop,
.eth_tx_adapter_free = cnxk_sso_tx_adapter_free,
- .timer_adapter_caps_get = cnxk_tim_caps_get,
+ .timer_adapter_caps_get = cn10k_tim_caps_get,
.crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
.crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 992a2a555c..5d527c3be8 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -942,7 +942,8 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
-cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
+ uint64_t aura __rte_unused)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@@ -951,12 +952,18 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
if (dev->dual_ws) {
struct cn9k_sso_hws_dual *dws =
event_dev->data->ports[i];
- dws->lookup_mem = lookup_mem;
+ dws->xaq_lmt = dev->xaq_lmt;
+ dws->fc_mem = (uint64_t *)dev->fc_iova;
dws->tstamp = dev->tstamp;
+ if (lookup_mem)
+ dws->lookup_mem = lookup_mem;
} else {
struct cn9k_sso_hws *ws = event_dev->data->ports[i];
- ws->lookup_mem = lookup_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
+ ws->fc_mem = (uint64_t *)dev->fc_iova;
ws->tstamp = dev->tstamp;
+ if (lookup_mem)
+ ws->lookup_mem = lookup_mem;
}
}
}
@@ -982,7 +989,7 @@ cn9k_sso_rx_adapter_queue_add(
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
- cn9k_sso_set_priv_mem(event_dev, lookup_mem);
+ cn9k_sso_set_priv_mem(event_dev, lookup_mem, 0);
cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
@@ -1121,6 +1128,7 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
int32_t queue_pair_id, const struct rte_event *event)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ int ret;
RTE_SET_USED(event);
@@ -1130,7 +1138,10 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
dev->is_ca_internal_port = 1;
cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
- return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+ ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
+ cn9k_sso_set_priv_mem(event_dev, NULL, 0);
+
+ return ret;
}
static int
@@ -1144,6 +1155,14 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
}
+static int
+cn9k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
+ uint32_t *caps, const struct event_timer_adapter_ops **ops)
+{
+ return cnxk_tim_caps_get(evdev, flags, caps, ops,
+ cn9k_sso_set_priv_mem);
+}
+
static struct eventdev_ops cn9k_sso_dev_ops = {
.dev_infos_get = cn9k_sso_info_get,
.dev_configure = cn9k_sso_dev_configure,
@@ -1175,7 +1194,7 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
.eth_tx_adapter_stop = cnxk_sso_tx_adapter_stop,
.eth_tx_adapter_free = cnxk_sso_tx_adapter_free,
- .timer_adapter_caps_get = cnxk_tim_caps_get,
+ .timer_adapter_caps_get = cn9k_tim_caps_get,
.crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index f8a536e71a..5dd79cbd47 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -8,6 +8,7 @@
#include "cnxk_tim_evdev.h"
static struct event_timer_adapter_ops cnxk_tim_ops;
+static cnxk_sso_set_priv_mem_t sso_set_priv_mem_fn;
static int
cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
@@ -265,6 +266,7 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
cnxk_sso_updt_xae_cnt(cnxk_sso_pmd_priv(dev->event_dev), tim_ring,
RTE_EVENT_TYPE_TIMER);
cnxk_sso_xae_reconfigure(dev->event_dev);
+ sso_set_priv_mem_fn(dev->event_dev, NULL, 0);
plt_tim_dbg(
"Total memory used %" PRIu64 "MB\n",
@@ -375,7 +377,8 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
int
cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
- uint32_t *caps, const struct event_timer_adapter_ops **ops)
+ uint32_t *caps, const struct event_timer_adapter_ops **ops,
+ cnxk_sso_set_priv_mem_t priv_mem_fn)
{
struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
@@ -389,6 +392,7 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
cnxk_tim_ops.start = cnxk_tim_ring_start;
cnxk_tim_ops.stop = cnxk_tim_ring_stop;
cnxk_tim_ops.get_info = cnxk_tim_ring_info_get;
+ sso_set_priv_mem_fn = priv_mem_fn;
if (dev->enable_stats) {
cnxk_tim_ops.stats_get = cnxk_tim_stats_get;
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index 0fda9f4f13..0c192346c7 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -78,6 +78,9 @@
#define TIM_BUCKET_SEMA_WLOCK \
(TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
+typedef void (*cnxk_sso_set_priv_mem_t)(const struct rte_eventdev *event_dev,
+ void *lookup_mem, uint64_t aura);
+
struct cnxk_tim_ctl {
uint16_t ring;
uint16_t chunk_slots;
@@ -317,7 +320,8 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps,
- const struct event_timer_adapter_ops **ops);
+ const struct event_timer_adapter_ops **ops,
+ cnxk_sso_set_priv_mem_t priv_mem_fn);
void cnxk_tim_init(struct roc_sso *sso);
void cnxk_tim_fini(void);
--
2.35.1
next reply other threads:[~2022-07-25 8:35 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-25 8:35 pbhagavatula [this message]
2022-09-13 14:36 ` Jerin Jacob
2022-09-21 8:01 ` [EXT] " Pavan Nikhilesh Bhagavatula
2022-09-27 3:23 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220725083545.2271-1-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=stable@dpdk.org \
--cc=sthotton@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).