From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
"Shijith Thotton" <sthotton@marvell.com>,
Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>
Subject: [PATCH 1/3] event/cnxk: align TX queue buffer adjustment
Date: Tue, 16 May 2023 20:07:50 +0530 [thread overview]
Message-ID: <20230516143752.4941-1-pbhagavatula@marvell.com> (raw)
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Remove recalculating SQB thresholds in Tx queue buffer adjustment.
The adjustment is already done during Tx queue setup.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
Depends-on: series-27660
drivers/event/cnxk/cn10k_eventdev.c | 9 +--------
drivers/event/cnxk/cn10k_tx_worker.h | 6 +++---
drivers/event/cnxk/cn9k_eventdev.c | 9 +--------
drivers/event/cnxk/cn9k_worker.h | 12 +++++++++---
drivers/net/cnxk/cn10k_tx.h | 12 ++++++------
drivers/net/cnxk/cn9k_tx.h | 5 +++--
6 files changed, 23 insertions(+), 30 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 89f32c4d1e..f7c6a83ff0 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -840,16 +840,9 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
sq = &cnxk_eth_dev->sqs[tx_queue_id];
txq = eth_dev->data->tx_queues[tx_queue_id];
sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
- sq->nb_sqb_bufs_adj =
- sq->nb_sqb_bufs -
- RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
- sqes_per_sqb;
if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
- sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
- (sqes_per_sqb - 1));
+ sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / sqes_per_sqb);
txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
- txq->nb_sqb_bufs_adj =
- ((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
}
}
diff --git a/drivers/event/cnxk/cn10k_tx_worker.h b/drivers/event/cnxk/cn10k_tx_worker.h
index c18786a14c..7b2798ad2e 100644
--- a/drivers/event/cnxk/cn10k_tx_worker.h
+++ b/drivers/event/cnxk/cn10k_tx_worker.h
@@ -32,9 +32,9 @@ cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
static __rte_always_inline int32_t
cn10k_sso_sq_depth(const struct cn10k_eth_txq *txq)
{
- return (txq->nb_sqb_bufs_adj -
- __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
- << txq->sqes_per_sqb_log2;
+ int32_t avail = (int32_t)txq->nb_sqb_bufs_adj -
+ (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+ return (avail << txq->sqes_per_sqb_log2) - avail;
}
static __rte_always_inline uint16_t
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index df23219f14..a9d603c22f 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -893,16 +893,9 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
sq = &cnxk_eth_dev->sqs[tx_queue_id];
txq = eth_dev->data->tx_queues[tx_queue_id];
sqes_per_sqb = 1U << txq->sqes_per_sqb_log2;
- sq->nb_sqb_bufs_adj =
- sq->nb_sqb_bufs -
- RTE_ALIGN_MUL_CEIL(sq->nb_sqb_bufs, sqes_per_sqb) /
- sqes_per_sqb;
if (cnxk_eth_dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
- sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
- (sqes_per_sqb - 1));
+ sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc / sqes_per_sqb);
txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
- txq->nb_sqb_bufs_adj =
- ((100 - ROC_NIX_SQB_THRESH) * txq->nb_sqb_bufs_adj) / 100;
}
}
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 988cb3acb6..d15dd309fe 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -711,6 +711,14 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
}
#endif
+static __rte_always_inline int32_t
+cn9k_sso_sq_depth(const struct cn9k_eth_txq *txq)
+{
+ int32_t avail = (int32_t)txq->nb_sqb_bufs_adj -
+ (int32_t)__atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+ return (avail << txq->sqes_per_sqb_log2) - avail;
+}
+
static __rte_always_inline uint16_t
cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
uint64_t *txq_data, const uint32_t flags)
@@ -734,9 +742,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
handle_tx_completion_pkts(txq, 1, 1);
- if (((txq->nb_sqb_bufs_adj -
- __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
- << txq->sqes_per_sqb_log2) <= 0)
+ if (cn9k_sso_sq_depth(txq) <= 0)
return 0;
cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
cn9k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index c9ec01cd9d..bab08a2d3b 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -35,12 +35,13 @@
#define NIX_XMIT_FC_OR_RETURN(txq, pkts) \
do { \
+ int64_t avail; \
/* Cached value is low, Update the fc_cache_pkts */ \
if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
+ avail = txq->nb_sqb_bufs_adj - *txq->fc_mem; \
/* Multiply with sqe_per_sqb to express in pkts */ \
(txq)->fc_cache_pkts = \
- ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) \
- << (txq)->sqes_per_sqb_log2; \
+ (avail << (txq)->sqes_per_sqb_log2) - avail; \
/* Check it again for the room */ \
if (unlikely((txq)->fc_cache_pkts < (pkts))) \
return 0; \
@@ -113,10 +114,9 @@ cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
if (cached < 0) {
/* Check if we have space else retry. */
do {
- refill =
- (txq->nb_sqb_bufs_adj -
- __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
- << txq->sqes_per_sqb_log2;
+ refill = txq->nb_sqb_bufs_adj -
+ __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED);
+ refill = (refill << txq->sqes_per_sqb_log2) - refill;
} while (refill <= 0);
__atomic_compare_exchange(&txq->fc_cache_pkts, &cached, &refill,
0, __ATOMIC_RELEASE,
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index e956c1ad2a..8efb75b505 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -32,12 +32,13 @@
#define NIX_XMIT_FC_OR_RETURN(txq, pkts) \
do { \
+ int64_t avail; \
/* Cached value is low, Update the fc_cache_pkts */ \
if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
+ avail = txq->nb_sqb_bufs_adj - *txq->fc_mem; \
/* Multiply with sqe_per_sqb to express in pkts */ \
(txq)->fc_cache_pkts = \
- ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) \
- << (txq)->sqes_per_sqb_log2; \
+ (avail << (txq)->sqes_per_sqb_log2) - avail; \
/* Check it again for the room */ \
if (unlikely((txq)->fc_cache_pkts < (pkts))) \
return 0; \
--
2.39.1
next reply other threads:[~2023-05-16 14:38 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-16 14:37 pbhagavatula [this message]
2023-05-16 14:37 ` [PATCH 2/3] event/cnxk: use local labels in asm intrinsic pbhagavatula
2023-05-16 14:37 ` [PATCH 3/3] event/cnxk: use WFE in Tx fc wait pbhagavatula
2023-06-12 15:52 ` [PATCH 1/3] event/cnxk: align TX queue buffer adjustment Jerin Jacob
2023-06-13 9:25 ` [PATCH v2 " pbhagavatula
2023-06-13 9:25 ` [PATCH v2 2/3] event/cnxk: use local labels in asm intrinsic pbhagavatula
2023-06-13 9:25 ` [PATCH v2 3/3] event/cnxk: use WFE in Tx fc wait pbhagavatula
2023-06-14 10:33 ` Jerin Jacob
2023-06-14 18:27 ` Patrick Robb
2023-06-14 20:24 ` [EXT] " Pavan Nikhilesh Bhagavatula
2023-06-15 5:49 ` Jerin Jacob Kollanukkaran
2023-06-15 15:28 ` Stephen Hemminger
2023-06-16 6:46 ` [EXT] " Pavan Nikhilesh Bhagavatula
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230516143752.4941-1-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
--cc=sthotton@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).