* [PATCH 1/2] event/cnxk: remove deschedule usage in CN9K
@ 2022-02-19 12:13 pbhagavatula
2022-02-19 12:13 ` [PATCH 2/2] event/cnxk: update SQB fc check for Tx adapter pbhagavatula
2022-02-22 9:51 ` [PATCH 1/2] event/cnxk: remove deschedule usage in CN9K Jerin Jacob
0 siblings, 2 replies; 3+ messages in thread
From: pbhagavatula @ 2022-02-19 12:13 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Using deschedule cmd might incorrectly ignore updates to WQE, GGRP
on CN9K.
Use addwork to pipeline work instead.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
drivers/event/cnxk/cn9k_worker.h | 41 +++++++++++++++++++++++++-------
1 file changed, 32 insertions(+), 9 deletions(-)
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 79374b8d95..0905d744cc 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -63,15 +63,18 @@ cn9k_sso_hws_fwd_swtag(uint64_t base, const struct rte_event *ev)
}
static __rte_always_inline void
-cn9k_sso_hws_fwd_group(uint64_t base, const struct rte_event *ev,
- const uint16_t grp)
+cn9k_sso_hws_new_event_wait(struct cn9k_sso_hws *ws, const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
+ const uint64_t event_ptr = ev->u64;
+ const uint16_t grp = ev->queue_id;
- plt_write64(ev->u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
- cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
- base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
+ while (ws->xaq_lmt <= __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED))
+ ;
+
+ cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
+ ws->grp_base + (grp << 12));
}
static __rte_always_inline void
@@ -86,10 +89,12 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
} else {
/*
* Group has been changed for group based work pipelining,
- * Use deschedule/add_work operation to transfer the event to
+ * Use add_work operation to transfer the event to
* new group/core
*/
- cn9k_sso_hws_fwd_group(ws->base, ev, grp);
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ roc_sso_hws_head_wait(ws->base);
+ cn9k_sso_hws_new_event_wait(ws, ev);
}
}
@@ -113,6 +118,22 @@ cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
return 1;
}
+static __rte_always_inline void
+cn9k_sso_hws_dual_new_event_wait(struct cn9k_sso_hws_dual *dws,
+ const struct rte_event *ev)
+{
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint64_t event_ptr = ev->u64;
+ const uint16_t grp = ev->queue_id;
+
+ while (dws->xaq_lmt <= __atomic_load_n(dws->fc_mem, __ATOMIC_RELAXED))
+ ;
+
+ cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
+ dws->grp_base + (grp << 12));
+}
+
static __rte_always_inline void
cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
const struct rte_event *ev)
@@ -126,10 +147,12 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
} else {
/*
* Group has been changed for group based work pipelining,
- * Use deschedule/add_work operation to transfer the event to
+ * Use add_work operation to transfer the event to
* new group/core
*/
- cn9k_sso_hws_fwd_group(base, ev, grp);
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+ roc_sso_hws_head_wait(base);
+ cn9k_sso_hws_dual_new_event_wait(dws, ev);
}
}
--
2.17.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 2/2] event/cnxk: update SQB fc check for Tx adapter
2022-02-19 12:13 [PATCH 1/2] event/cnxk: remove deschedule usage in CN9K pbhagavatula
@ 2022-02-19 12:13 ` pbhagavatula
2022-02-22 9:51 ` [PATCH 1/2] event/cnxk: remove deschedule usage in CN9K Jerin Jacob
1 sibling, 0 replies; 3+ messages in thread
From: pbhagavatula @ 2022-02-19 12:13 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Update SQB limit to include CPT queue size when Security
offload is enabled.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
drivers/event/cnxk/cn10k_eventdev.c | 30 +++++++++++++++++++
drivers/event/cnxk/cn10k_worker.h | 18 +++++------
drivers/event/cnxk/cn9k_eventdev.c | 28 ++++++++---------
drivers/event/cnxk/cn9k_worker.h | 5 ++--
drivers/event/cnxk/cnxk_eventdev.h | 1 -
drivers/event/cnxk/cnxk_eventdev_adptr.c | 38 +++++++++++++++++++-----
6 files changed, 86 insertions(+), 34 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 26d65e3568..24f3a5908c 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -717,6 +717,35 @@ cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
return 0;
}
+static void
+cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cn10k_eth_txq *txq;
+ struct roc_nix_sq *sq;
+ int i;
+
+ if (tx_queue_id < 0) {
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ cn10k_sso_txq_fc_update(eth_dev, i);
+ } else {
+ uint16_t sqes_per_sqb;
+
+ sq = &cnxk_eth_dev->sqs[tx_queue_id];
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
+ sq->nb_sqb_bufs_adj =
+ sq->nb_sqb_bufs -
+ RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
+ sqes_per_sqb;
+ if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
+ (sqes_per_sqb - 1));
+ txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+ txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+ }
+}
+
static int
cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
@@ -746,6 +775,7 @@ cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
}
dev->tx_offloads |= tx_offloads;
+ cn10k_sso_txq_fc_update(eth_dev, tx_queue_id);
rc = cn10k_sso_updt_tx_adptr_data(event_dev);
if (rc < 0)
return rc;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index cfe729cef9..bb32ef75ef 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -470,6 +470,14 @@ cn10k_sso_hws_xtract_meta(struct rte_mbuf *m, const uint64_t *txq_data)
(BIT_ULL(48) - 1));
}
+static __rte_always_inline void
+cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
+{
+ while ((uint64_t)txq->nb_sqb_bufs_adj <=
+ __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
+ ;
+}
+
static __rte_always_inline void
cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
uint16_t lmt_id, uintptr_t lmt_addr, uint8_t sched_type,
@@ -517,6 +525,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
if (!CNXK_TAG_IS_HEAD(ws->gw_rdata) && !sched_type)
ws->gw_rdata = roc_sso_hws_head_wait(ws->base);
+ cn10k_sso_txq_fc_wait(txq);
roc_lmt_submit_steorl(lmt_id, pa);
}
@@ -577,7 +586,6 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
struct cn10k_eth_txq *txq;
struct rte_mbuf *m;
uintptr_t lmt_addr;
- uint16_t ref_cnt;
uint16_t lmt_id;
lmt_addr = ws->lmt_base;
@@ -607,17 +615,9 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
}
m = ev->mbuf;
- ref_cnt = m->refcnt;
cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, txq_data,
flags);
- if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
- if (ref_cnt > 1)
- return 1;
- }
-
- cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_TAG,
- ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
return 1;
}
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 6d3d03c97c..8e55961ddc 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -955,8 +955,7 @@ cn9k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
}
static void
-cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
- bool ena)
+cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct cn9k_eth_txq *txq;
@@ -965,20 +964,21 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id,
if (tx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- cn9k_sso_txq_fc_update(eth_dev, i, ena);
+ cn9k_sso_txq_fc_update(eth_dev, i);
} else {
- uint16_t sq_limit;
+ uint16_t sqes_per_sqb;
sq = &cnxk_eth_dev->sqs[tx_queue_id];
txq = eth_dev->data->tx_queues[tx_queue_id];
- sq_limit =
- ena ? RTE_MIN(CNXK_SSO_SQB_LIMIT, sq->aura_sqb_bufs) :
- sq->nb_sqb_bufs;
- txq->nb_sqb_bufs_adj =
- sq_limit -
- RTE_ALIGN_MUL_CEIL(sq_limit,
- (1ULL << txq->sqes_per_sqb_log2)) /
- (1ULL << txq->sqes_per_sqb_log2);
+ sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
+ sq->nb_sqb_bufs_adj =
+ sq->nb_sqb_bufs -
+ RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
+ sqes_per_sqb;
+ if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
+ (sqes_per_sqb - 1));
+ txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
}
}
@@ -1012,7 +1012,7 @@ cn9k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
}
dev->tx_offloads |= tx_offloads;
- cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, true);
+ cn9k_sso_txq_fc_update(eth_dev, tx_queue_id);
rc = cn9k_sso_updt_tx_adptr_data(event_dev);
if (rc < 0)
return rc;
@@ -1033,7 +1033,7 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
if (rc < 0)
return rc;
- cn9k_sso_txq_fc_update(eth_dev, tx_queue_id, false);
+ cn9k_sso_txq_fc_update(eth_dev, tx_queue_id);
return cn9k_sso_updt_tx_adptr_data(event_dev);
}
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 0905d744cc..79b2b3809f 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -613,9 +613,8 @@ NIX_RX_FASTPATH_MODES
static __rte_always_inline void
cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
{
- while (!((txq->nb_sqb_bufs_adj -
- __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
- << (txq)->sqes_per_sqb_log2))
+ while ((uint64_t)txq->nb_sqb_bufs_adj <=
+ __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
;
}
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index e3b5ffa7eb..b157fef096 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -38,7 +38,6 @@
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
#define CNXK_SSO_WQE_SG_PTR (9)
-#define CNXK_SSO_SQB_LIMIT (0x180)
#define CNXK_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY)
#define CNXK_TT_FROM_EVENT(x) (((x) >> 38) & SSO_TT_EMPTY)
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 5ebd3340e7..7b580ca98f 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -335,8 +335,18 @@ cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
static int
cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
{
- return roc_npa_aura_limit_modify(
- sq->aura_handle, RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
+ int rc;
+
+ if (sq->nb_sqb_bufs != nb_sqb_bufs) {
+ rc = roc_npa_aura_limit_modify(
+ sq->aura_handle,
+ RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
+ if (rc < 0)
+ return rc;
+
+ sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs);
+ }
+ return 0;
}
static void
@@ -522,22 +532,29 @@ cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct roc_nix_sq *sq;
- int i, ret;
+ int i, ret = 0;
void *txq;
if (tx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, i);
+ ret |= cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev,
+ i);
} else {
txq = eth_dev->data->tx_queues[tx_queue_id];
sq = &cnxk_eth_dev->sqs[tx_queue_id];
- cnxk_sso_sqb_aura_limit_edit(sq, CNXK_SSO_SQB_LIMIT);
+ cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
ret = cnxk_sso_updt_tx_queue_data(
event_dev, eth_dev->data->port_id, tx_queue_id, txq);
if (ret < 0)
return ret;
}
+ if (ret < 0) {
+ plt_err("Failed to configure Tx adapter port=%d, q=%d",
+ eth_dev->data->port_id, tx_queue_id);
+ return ret;
+ }
+
return 0;
}
@@ -548,12 +565,13 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct roc_nix_sq *sq;
- int i, ret;
+ int i, ret = 0;
RTE_SET_USED(event_dev);
if (tx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, i);
+ ret |= cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev,
+ i);
} else {
sq = &cnxk_eth_dev->sqs[tx_queue_id];
cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
@@ -563,5 +581,11 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
return ret;
}
+ if (ret < 0) {
+ plt_err("Failed to clear Tx adapter config port=%d, q=%d",
+ eth_dev->data->port_id, tx_queue_id);
+ return ret;
+ }
+
return 0;
}
--
2.17.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH 1/2] event/cnxk: remove deschedule usage in CN9K
2022-02-19 12:13 [PATCH 1/2] event/cnxk: remove deschedule usage in CN9K pbhagavatula
2022-02-19 12:13 ` [PATCH 2/2] event/cnxk: update SQB fc check for Tx adapter pbhagavatula
@ 2022-02-22 9:51 ` Jerin Jacob
1 sibling, 0 replies; 3+ messages in thread
From: Jerin Jacob @ 2022-02-22 9:51 UTC (permalink / raw)
To: Pavan Nikhilesh; +Cc: Jerin Jacob, Shijith Thotton, dpdk-dev
On Sat, Feb 19, 2022 at 6:05 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Using deschedule cmd might incorrectly ignore updates to WQE, GGRP
> on CN9K.
> Use addwork to pipeline work instead.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Series applied to dpdk-next-net-eventdev/for-main. Thanks
> ---
> drivers/event/cnxk/cn9k_worker.h | 41 +++++++++++++++++++++++++-------
> 1 file changed, 32 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
> index 79374b8d95..0905d744cc 100644
> --- a/drivers/event/cnxk/cn9k_worker.h
> +++ b/drivers/event/cnxk/cn9k_worker.h
> @@ -63,15 +63,18 @@ cn9k_sso_hws_fwd_swtag(uint64_t base, const struct rte_event *ev)
> }
>
> static __rte_always_inline void
> -cn9k_sso_hws_fwd_group(uint64_t base, const struct rte_event *ev,
> - const uint16_t grp)
> +cn9k_sso_hws_new_event_wait(struct cn9k_sso_hws *ws, const struct rte_event *ev)
> {
> const uint32_t tag = (uint32_t)ev->event;
> const uint8_t new_tt = ev->sched_type;
> + const uint64_t event_ptr = ev->u64;
> + const uint16_t grp = ev->queue_id;
>
> - plt_write64(ev->u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
> - cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
> - base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
> + while (ws->xaq_lmt <= __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED))
> + ;
> +
> + cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
> + ws->grp_base + (grp << 12));
> }
>
> static __rte_always_inline void
> @@ -86,10 +89,12 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
> } else {
> /*
> * Group has been changed for group based work pipelining,
> - * Use deschedule/add_work operation to transfer the event to
> + * Use add_work operation to transfer the event to
> * new group/core
> */
> - cn9k_sso_hws_fwd_group(ws->base, ev, grp);
> + rte_atomic_thread_fence(__ATOMIC_RELEASE);
> + roc_sso_hws_head_wait(ws->base);
> + cn9k_sso_hws_new_event_wait(ws, ev);
> }
> }
>
> @@ -113,6 +118,22 @@ cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
> return 1;
> }
>
> +static __rte_always_inline void
> +cn9k_sso_hws_dual_new_event_wait(struct cn9k_sso_hws_dual *dws,
> + const struct rte_event *ev)
> +{
> + const uint32_t tag = (uint32_t)ev->event;
> + const uint8_t new_tt = ev->sched_type;
> + const uint64_t event_ptr = ev->u64;
> + const uint16_t grp = ev->queue_id;
> +
> + while (dws->xaq_lmt <= __atomic_load_n(dws->fc_mem, __ATOMIC_RELAXED))
> + ;
> +
> + cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
> + dws->grp_base + (grp << 12));
> +}
> +
> static __rte_always_inline void
> cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
> const struct rte_event *ev)
> @@ -126,10 +147,12 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
> } else {
> /*
> * Group has been changed for group based work pipelining,
> - * Use deschedule/add_work operation to transfer the event to
> + * Use add_work operation to transfer the event to
> * new group/core
> */
> - cn9k_sso_hws_fwd_group(base, ev, grp);
> + rte_atomic_thread_fence(__ATOMIC_RELEASE);
> + roc_sso_hws_head_wait(base);
> + cn9k_sso_hws_dual_new_event_wait(dws, ev);
> }
> }
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2022-02-22 9:51 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-19 12:13 [PATCH 1/2] event/cnxk: remove deschedule usage in CN9K pbhagavatula
2022-02-19 12:13 ` [PATCH 2/2] event/cnxk: update SQB fc check for Tx adapter pbhagavatula
2022-02-22 9:51 ` [PATCH 1/2] event/cnxk: remove deschedule usage in CN9K Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).