patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Yongseok Koh <yskoh@mellanox.com>
To: Yongseok Koh <yskoh@mellanox.com>
Cc: Xueming Li <xuemingl@mellanox.com>, dpdk stable <stable@dpdk.org>
Subject: [dpdk-stable] patch 'net/mlx5: fix assert for Tx completion queue count' has been queued to LTS release 17.11.4
Date: Thu, 26 Jul 2018 19:32:05 -0700	[thread overview]
Message-ID: <20180727023205.38767-10-yskoh@mellanox.com> (raw)
In-Reply-To: <20180727023205.38767-1-yskoh@mellanox.com>

Hi,

FYI, your patch has been queued to LTS release 17.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 07/28/18. So please
shout if anyone has objections.

Thanks.

Yongseok

---
>From 3bf985f340ab3ebe9318331cbd42ed05e08781ab Mon Sep 17 00:00:00 2001
From: Yongseok Koh <yskoh@mellanox.com>
Date: Mon, 23 Jul 2018 11:00:10 -0700
Subject: [PATCH] net/mlx5: fix assert for Tx completion queue count

[ upstream commit c618e7e82bfed1da91f1dbabdd06e0a39fdb7b78 ]

There should be at least one Tx CQE remained if Tx WQ and txq->elts[] have
available slots to send a packet because the size of Tx CQ is exactly
calculated from the size of other resources. As it is guaranteed, it is
checked by an assertion.

max_elts is checked after the assertion for Tx CQ. If no slot is available
in txq->elts[], the assertion would be wrong.

Fixes: 2eefbec531c7 ("net/mlx5: add missing sanity checks for Tx completion queue")
Fixes: 6ce84bd88919 ("net/mlx5: add enhanced multi-packet send for ConnectX-5")

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Xueming Li <xuemingl@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c          | 28 ++++++++--------------------
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 14 ++++----------
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h  | 14 ++++----------
 3 files changed, 16 insertions(+), 40 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 38f036c4b..1bbce3b75 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -401,8 +401,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	/* Start processing. */
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
-	/* A CQE slot must always be available. */
-	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	if (unlikely(!max_wqe))
 		return 0;
@@ -742,14 +740,13 @@ next_wqe:
 	/* Check whether completion threshold has been reached. */
 	comp = txq->elts_comp + i + j + k;
 	if (comp >= MLX5_TX_COMP_THRESH) {
+		/* A CQE slot must always be available. */
+		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
 		/* Request completion on last WQE. */
 		last_wqe->ctrl2 = rte_cpu_to_be_32(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
 		last_wqe->ctrl3 = txq->elts_head;
 		txq->elts_comp = 0;
-#ifndef NDEBUG
-		++txq->cq_pi;
-#endif
 	} else {
 		txq->elts_comp = comp;
 	}
@@ -868,8 +865,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	/* Start processing. */
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
-	/* A CQE slot must always be available. */
-	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	if (unlikely(!max_wqe))
 		return 0;
@@ -958,14 +953,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	if (comp >= MLX5_TX_COMP_THRESH) {
 		volatile struct mlx5_wqe *wqe = mpw.wqe;
 
+		/* A CQE slot must always be available. */
+		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
 		/* Request completion on last WQE. */
 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
-#ifndef NDEBUG
-		++txq->cq_pi;
-#endif
 	} else {
 		txq->elts_comp = comp;
 	}
@@ -1097,8 +1091,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 	/* Start processing. */
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
-	/* A CQE slot must always be available. */
-	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	do {
 		struct rte_mbuf *buf = *(pkts++);
 		uintptr_t addr;
@@ -1255,14 +1247,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 	if (comp >= MLX5_TX_COMP_THRESH) {
 		volatile struct mlx5_wqe *wqe = mpw.wqe;
 
+		/* A CQE slot must always be available. */
+		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
 		/* Request completion on last WQE. */
 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
-#ifndef NDEBUG
-		++txq->cq_pi;
-#endif
 	} else {
 		txq->elts_comp = comp;
 	}
@@ -1385,8 +1376,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	/* Start processing. */
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
-	/* A CQE slot must always be available. */
-	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	if (unlikely(!max_wqe))
 		return 0;
@@ -1599,15 +1588,14 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			 (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
 		volatile struct mlx5_wqe *wqe = mpw.wqe;
 
+		/* A CQE slot must always be available. */
+		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
 		/* Request completion on last WQE. */
 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
 		txq->mpw_comp = txq->wqe_ci;
-#ifndef NDEBUG
-		++txq->cq_pi;
-#endif
 	} else {
 		txq->elts_comp += j;
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 32d1e6de9..e748615ee 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -135,8 +135,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 
 	assert(elts_n > pkts_n);
 	mlx5_tx_complete(txq);
-	/* A CQE slot must always be available. */
-	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	if (unlikely(!pkts_n))
 		return 0;
 	for (n = 0; n < pkts_n; ++n) {
@@ -204,12 +202,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 	txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
 	txq->elts_head = elts_head;
 	if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+		/* A CQE slot must always be available. */
+		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		wqe->ctrl[3] = txq->elts_head;
 		txq->elts_comp = 0;
-#ifndef NDEBUG
-		++txq->cq_pi;
-#endif
 	}
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	txq->stats.opackets += n;
@@ -273,8 +270,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	assert(elts_n > pkts_n);
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
-	/* A CQE slot must always be available. */
-	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
 	if (unlikely(!pkts_n))
@@ -310,11 +305,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
 		txq->elts_comp += pkts_n;
 	} else {
+		/* A CQE slot must always be available. */
+		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
 		/* Request a completion. */
 		txq->elts_comp = 0;
-#ifndef NDEBUG
-		++txq->cq_pi;
-#endif
 		comp_req = 8;
 	}
 	/* Fill CTRL in the header. */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 1a56fca9f..7e8c9b885 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -135,8 +135,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 
 	assert(elts_n > pkts_n);
 	mlx5_tx_complete(txq);
-	/* A CQE slot must always be available. */
-	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	if (unlikely(!pkts_n))
 		return 0;
 	for (n = 0; n < pkts_n; ++n) {
@@ -205,12 +203,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 	txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
 	txq->elts_head = elts_head;
 	if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+		/* A CQE slot must always be available. */
+		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		wqe->ctrl[3] = txq->elts_head;
 		txq->elts_comp = 0;
-#ifndef NDEBUG
-		++txq->cq_pi;
-#endif
 	}
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	txq->stats.opackets += n;
@@ -272,8 +269,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	assert(elts_n > pkts_n);
 	mlx5_tx_complete(txq);
 	max_elts = (elts_n - (elts_head - txq->elts_tail));
-	/* A CQE slot must always be available. */
-	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
 	pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
 	assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
@@ -311,11 +306,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
 	if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
 		txq->elts_comp += pkts_n;
 	} else {
+		/* A CQE slot must always be available. */
+		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
 		/* Request a completion. */
 		txq->elts_comp = 0;
-#ifndef NDEBUG
-		++txq->cq_pi;
-#endif
 		comp_req = 8;
 	}
 	/* Fill CTRL in the header. */
-- 
2.11.0

      parent reply	other threads:[~2018-07-27  2:32 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-27  2:31 [dpdk-stable] patch 'metrics: add check for invalid key' " Yongseok Koh
2018-07-27  2:31 ` [dpdk-stable] patch 'metrics: do not fail silently when uninitialised' " Yongseok Koh
2018-07-27  2:31 ` [dpdk-stable] patch 'metrics: disallow null as metric name' " Yongseok Koh
2018-07-27  2:31 ` [dpdk-stable] patch 'test: fix EAL flags autotest on FreeBSD' " Yongseok Koh
2018-07-27  2:32 ` [dpdk-stable] patch 'test/flow_classify: fix return types' " Yongseok Koh
2018-07-27  2:32 ` [dpdk-stable] patch 'test/hash: fix multiwriter with non consecutive cores' " Yongseok Koh
2018-07-27  2:32 ` [dpdk-stable] patch 'test/hash: fix potential memory leak' " Yongseok Koh
2018-07-27  2:32 ` [dpdk-stable] patch 'doc: fix bonding command in testpmd' " Yongseok Koh
2018-07-27  2:32 ` [dpdk-stable] patch 'net/mlx5: add missing sanity checks for Tx completion queue' " Yongseok Koh
2018-07-27  2:32 ` Yongseok Koh [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180727023205.38767-10-yskoh@mellanox.com \
    --to=yskoh@mellanox.com \
    --cc=stable@dpdk.org \
    --cc=xuemingl@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).