From: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
To: dev@dpdk.org
Cc: matan@mellanox.com, rasland@mellanox.com, orika@mellanox.com
Subject: [dpdk-dev] [PATCH v2 1/4] net/mlx5: move Tx complete request routine
Date: Thu, 9 Jan 2020 10:56:17 +0000 [thread overview]
Message-ID: <1578567380-26994-2-git-send-email-viacheslavo@mellanox.com> (raw)
In-Reply-To: <1578567380-26994-1-git-send-email-viacheslavo@mellanox.com>
The complete request flag is set once per Tx burst call,
the code of appropriate routine moved to the end of sending
loop. This is preparation step to remove WQE reserved field
usage to store index of elts to free.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
drivers/net/mlx5/mlx5_rxtx.c | 26 ++++----------------------
1 file changed, 4 insertions(+), 22 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 25a2952..ee6d5fc 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -2145,9 +2145,6 @@ enum mlx5_txcmp_code {
* Pointer to TX queue structure.
* @param loc
* Pointer to burst routine local context.
- * @param multi,
- * Routine is called from multi-segment sending loop,
- * do not correct the elts_head according to the pkts_copy.
* @param olx
* Configured Tx offloads mask. It is fully defined at
* compile time and may be used for optimization.
@@ -2155,13 +2152,12 @@ enum mlx5_txcmp_code {
static __rte_always_inline void
mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
struct mlx5_txq_local *restrict loc,
- bool multi,
unsigned int olx)
{
uint16_t head = txq->elts_head;
unsigned int part;
- part = (MLX5_TXOFF_CONFIG(INLINE) || multi) ?
+ part = MLX5_TXOFF_CONFIG(INLINE) ?
0 : loc->pkts_sent - loc->pkts_copy;
head += part;
if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
@@ -3120,8 +3116,6 @@ enum mlx5_txcmp_code {
wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, true, olx);
return MLX5_TXCMP_CODE_MULTI;
}
@@ -3230,8 +3224,6 @@ enum mlx5_txcmp_code {
} while (true);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, true, olx);
return MLX5_TXCMP_CODE_MULTI;
}
@@ -3388,8 +3380,6 @@ enum mlx5_txcmp_code {
wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, true, olx);
return MLX5_TXCMP_CODE_MULTI;
}
@@ -3599,8 +3589,6 @@ enum mlx5_txcmp_code {
--loc->elts_free;
++loc->pkts_sent;
--pkts_n;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, false, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
@@ -3750,7 +3738,7 @@ enum mlx5_txcmp_code {
struct mlx5_txq_local *restrict loc,
unsigned int ds,
unsigned int slen,
- unsigned int olx)
+ unsigned int olx __rte_unused)
{
assert(!MLX5_TXOFF_CONFIG(INLINE));
#ifdef MLX5_PMD_SOFT_COUNTERS
@@ -3765,8 +3753,6 @@ enum mlx5_txcmp_code {
loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, false, olx);
}
/*
@@ -3809,8 +3795,6 @@ enum mlx5_txcmp_code {
loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
txq->wqe_ci += (len + 3) / 4;
loc->wqe_free -= (len + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, false, olx);
}
/**
@@ -4011,8 +3995,6 @@ enum mlx5_txcmp_code {
txq->wqe_ci += (2 + part + 3) / 4;
loc->wqe_free -= (2 + part + 3) / 4;
pkts_n -= part;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, false, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
@@ -4496,8 +4478,6 @@ enum mlx5_txcmp_code {
}
++loc->pkts_sent;
--pkts_n;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, false, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
@@ -4776,6 +4756,8 @@ enum mlx5_txcmp_code {
/* Take a shortcut if nothing is sent. */
if (unlikely(loc.pkts_sent == loc.pkts_loop))
goto burst_exit;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, &loc, olx);
/*
* Ring QP doorbell immediately after WQE building completion
* to improve latencies. The pure software related data treatment
--
1.8.3.1
next prev parent reply other threads:[~2020-01-09 10:56 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-08 16:15 [dpdk-dev] [PATCH 0/4] net/mlx5: remove Tx descriptor reserved field usage Viacheslav Ovsiienko
2020-01-08 16:15 ` [dpdk-dev] [PATCH 1/4] net/mlx5: move Tx complete request routine Viacheslav Ovsiienko
2020-01-08 16:15 ` [dpdk-dev] [PATCH 2/4] net/mlx5: update Tx error handling routine Viacheslav Ovsiienko
2020-01-08 16:16 ` [dpdk-dev] [PATCH 3/4] net/mlx5: add free on completion queue Viacheslav Ovsiienko
2020-01-08 16:16 ` [dpdk-dev] [PATCH 4/4] net/mlx5: engage " Viacheslav Ovsiienko
2020-01-09 10:56 ` [dpdk-dev] [PATCH v2 0/4] net/mlx5: remove Tx descriptor reserved field usage Viacheslav Ovsiienko
2020-01-09 10:56 ` Viacheslav Ovsiienko [this message]
2020-01-09 10:56 ` [dpdk-dev] [PATCH v2 2/4] net/mlx5: update Tx error handling routine Viacheslav Ovsiienko
2020-01-09 10:56 ` [dpdk-dev] [PATCH v2 3/4] net/mlx5: add free on completion queue Viacheslav Ovsiienko
2020-01-09 15:12 ` Ferruh Yigit
2020-01-09 15:22 ` Slava Ovsiienko
2020-01-09 10:56 ` [dpdk-dev] [PATCH v2 4/4] net/mlx5: engage " Viacheslav Ovsiienko
2020-01-09 15:18 ` Ferruh Yigit
2020-01-09 15:27 ` Slava Ovsiienko
2020-01-09 15:43 ` Ferruh Yigit
2020-01-09 16:22 ` Slava Ovsiienko
2020-01-10 9:06 ` Thomas Monjalon
2020-01-10 9:28 ` Slava Ovsiienko
2020-01-10 9:34 ` Thomas Monjalon
2020-01-10 9:55 ` Slava Ovsiienko
2020-01-10 13:11 ` Thomas Monjalon
2020-01-10 13:42 ` Slava Ovsiienko
2020-01-17 10:44 ` Slava Ovsiienko
2020-01-09 14:22 ` [dpdk-dev] [PATCH v2 0/4] net/mlx5: remove Tx descriptor reserved field usage Raslan Darawsheh
2020-01-13 9:35 ` Raslan Darawsheh
2020-01-09 17:16 ` [dpdk-dev] [PATCH v3 " Viacheslav Ovsiienko
2020-01-09 17:16 ` [dpdk-dev] [PATCH v3 1/4] net/mlx5: move Tx complete request routine Viacheslav Ovsiienko
2020-01-09 17:16 ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: update Tx error handling routine Viacheslav Ovsiienko
2020-01-09 17:16 ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: add free on completion queue Viacheslav Ovsiienko
2020-01-09 17:16 ` [dpdk-dev] [PATCH v3 4/4] net/mlx5: engage " Viacheslav Ovsiienko
2020-01-13 9:36 ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: remove Tx descriptor reserved field usage Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1578567380-26994-2-git-send-email-viacheslavo@mellanox.com \
--to=viacheslavo@mellanox.com \
--cc=dev@dpdk.org \
--cc=matan@mellanox.com \
--cc=orika@mellanox.com \
--cc=rasland@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).