From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <nelio.laranjeiro@6wind.com>
Received: from mail-wm0-f49.google.com (mail-wm0-f49.google.com [74.125.82.49])
 by dpdk.org (Postfix) with ESMTP id 5C683C726
 for <dev@dpdk.org>; Fri, 24 Jun 2016 15:19:19 +0200 (CEST)
Received: by mail-wm0-f49.google.com with SMTP id v199so22292324wmv.0
 for <dev@dpdk.org>; Fri, 24 Jun 2016 06:19:19 -0700 (PDT)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
 d=6wind-com.20150623.gappssmtp.com; s=20150623;
 h=from:to:cc:subject:date:message-id:in-reply-to:references;
 bh=jWmeK3PXNaN7F2o/fUlUh1k21Fhg29sOmm2TwCpJs/s=;
 b=z+ZHOekR9eGpTwERX706vud5JCTVUOiDpTv8A/xh8gGnWwdVaxNtLNvIGsWFsynNcK
 mj/qkR62jrMk86a6zK1o/2FsNIaIwLf9LWIVMzoW+CGgFkaCos+u4YIl+kNScmEJxzij
 xM95gvR5PSQ3nKoD7D+G8PvIyTeL2DWNwtTI599Pm5kS6/vOCedT17TVYOusabrevkej
 zgG8D8mT6l9g5z5PYbYmGGcon7BKifCsiWE+P98Mtb366ABC/Pz6770QnnHev5AMJO8d
 KmD+33M/LEfvA4yrFLmqXFnb3XXg5luLi0zR3MwVkmOGdUAML8ceVe101D3bFiLtsCu0
 F+IA==
X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
 d=1e100.net; s=20130820;
 h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to
 :references;
 bh=jWmeK3PXNaN7F2o/fUlUh1k21Fhg29sOmm2TwCpJs/s=;
 b=ce2xnlg2+K3LNYWIO3CFjoCt2ASaPeLmBA5n3b72/khJw05P1HtecYmVhDAfC3izKj
 lh8crkHSmDGcthouQj8207oSz/qKd1RzOSxHvUkJN8LHMW8d1Uzd06MuE5miUfdS/m4k
 UPxzcmtSW+nfy9FbCFfT7pwng/ErvQTgb+MbML3km3u/vH1z8BCMIiIq81WWkUzwIeCU
 N7z7rdvALPV3lKfs4Hnt7NXycAyO782FLKWEgpCqlg5P5X4H6TbOF36THmjoV7Gu2GBE
 wHbuCzUhK7F7y4ZbT0M9SW4nTy7BLzR49oKiRHoZgcnDWRypHUq9Y6Ao6cAMHCuBn0dz
 9WkQ==
X-Gm-Message-State: ALyK8tJGc8VgR7hueu5WEJupjhLhwyLQoaG97Yf7CpwU9C59jY04G+vMRx1+y0Ij4BIlcTz1
X-Received: by 10.28.18.199 with SMTP id 190mr6206025wms.66.1466774358939;
 Fri, 24 Jun 2016 06:19:18 -0700 (PDT)
Received: from ping.vm.6wind.com (guy78-3-82-239-227-177.fbx.proxad.net.
 [82.239.227.177])
 by smtp.gmail.com with ESMTPSA id m125sm1279533wmm.8.2016.06.24.06.19.17
 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);
 Fri, 24 Jun 2016 06:19:18 -0700 (PDT)
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
 Ferruh Yigit <ferruh.yigit@intel.com>,
 Adrien Mazarguil <adrien.mazarguil@6wind.com>
Date: Fri, 24 Jun 2016 15:17:59 +0200
Message-Id: <1466774284-20932-21-git-send-email-nelio.laranjeiro@6wind.com>
X-Mailer: git-send-email 2.1.4
In-Reply-To: <1466774284-20932-1-git-send-email-nelio.laranjeiro@6wind.com>
References: <1466758261-25986-1-git-send-email-nelio.laranjeiro@6wind.com>
 <1466774284-20932-1-git-send-email-nelio.laranjeiro@6wind.com>
Subject: [dpdk-dev] [PATCH v7 20/25] mlx5: check remaining space while
	processing Tx burst
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: patches and discussions about DPDK <dev.dpdk.org>
List-Unsubscribe: <http://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <http://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Fri, 24 Jun 2016 13:19:19 -0000

From: Adrien Mazarguil <adrien.mazarguil@6wind.com>

The space necessary to store segmented packets cannot be known in advance
and must be verified for each of them.

Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 144 +++++++++++++++++++++++--------------------
 1 file changed, 78 insertions(+), 66 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index ed2b5fe..fadc182 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -585,50 +585,51 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	struct txq *txq = (struct txq *)dpdk_txq;
 	uint16_t elts_head = txq->elts_head;
 	const unsigned int elts_n = txq->elts_n;
-	unsigned int i;
+	unsigned int i = 0;
 	unsigned int max;
 	unsigned int comp;
 	volatile union mlx5_wqe *wqe;
-	struct rte_mbuf *buf;
 
 	if (unlikely(!pkts_n))
 		return 0;
-	buf = pkts[0];
 	/* Prefetch first packet cacheline. */
 	tx_prefetch_cqe(txq, txq->cq_ci);
 	tx_prefetch_cqe(txq, txq->cq_ci + 1);
-	rte_prefetch0(buf);
+	rte_prefetch0(*pkts);
 	/* Start processing. */
 	txq_complete(txq);
 	max = (elts_n - (elts_head - txq->elts_tail));
 	if (max > elts_n)
 		max -= elts_n;
-	assert(max >= 1);
-	assert(max <= elts_n);
-	/* Always leave one free entry in the ring. */
-	--max;
-	if (max == 0)
-		return 0;
-	if (max > pkts_n)
-		max = pkts_n;
-	for (i = 0; (i != max); ++i) {
-		unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1);
+	do {
+		struct rte_mbuf *buf;
+		unsigned int elts_head_next;
 		uintptr_t addr;
 		uint32_t length;
 		uint32_t lkey;
 
+		/*
+		 * Make sure there is enough room to store this packet and
+		 * that one ring entry remains unused.
+		 */
+		if (max < 1 + 1)
+			break;
+		--max;
+		--pkts_n;
+		buf = *(pkts++);
+		elts_head_next = (elts_head + 1) & (elts_n - 1);
 		wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
 		rte_prefetch0(wqe);
-		if (i + 1 < max)
-			rte_prefetch0(pkts[i + 1]);
+		if (pkts_n)
+			rte_prefetch0(*pkts);
 		/* Retrieve buffer information. */
 		addr = rte_pktmbuf_mtod(buf, uintptr_t);
 		length = DATA_LEN(buf);
 		/* Update element. */
 		(*txq->elts)[elts_head] = buf;
 		/* Prefetch next buffer data. */
-		if (i + 1 < max)
-			rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 1],
+		if (pkts_n)
+			rte_prefetch0(rte_pktmbuf_mtod(*pkts,
 						       volatile void *));
 		/* Retrieve Memory Region key for this memory pool. */
 		lkey = txq_mp2mr(txq, txq_mb2mp(buf));
@@ -652,8 +653,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		txq->stats.obytes += length;
 #endif
 		elts_head = elts_head_next;
-		buf = pkts[i + 1];
-	}
+		++i;
+	} while (pkts_n);
 	/* Take a shortcut if nothing must be sent. */
 	if (unlikely(i == 0))
 		return 0;
@@ -697,44 +698,45 @@ mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	struct txq *txq = (struct txq *)dpdk_txq;
 	uint16_t elts_head = txq->elts_head;
 	const unsigned int elts_n = txq->elts_n;
-	unsigned int i;
+	unsigned int i = 0;
 	unsigned int max;
 	unsigned int comp;
 	volatile union mlx5_wqe *wqe;
-	struct rte_mbuf *buf;
 	unsigned int max_inline = txq->max_inline;
 
 	if (unlikely(!pkts_n))
 		return 0;
-	buf = pkts[0];
 	/* Prefetch first packet cacheline. */
 	tx_prefetch_cqe(txq, txq->cq_ci);
 	tx_prefetch_cqe(txq, txq->cq_ci + 1);
-	rte_prefetch0(buf);
+	rte_prefetch0(*pkts);
 	/* Start processing. */
 	txq_complete(txq);
 	max = (elts_n - (elts_head - txq->elts_tail));
 	if (max > elts_n)
 		max -= elts_n;
-	assert(max >= 1);
-	assert(max <= elts_n);
-	/* Always leave one free entry in the ring. */
-	--max;
-	if (max == 0)
-		return 0;
-	if (max > pkts_n)
-		max = pkts_n;
-	for (i = 0; (i != max); ++i) {
-		unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1);
+	do {
+		struct rte_mbuf *buf;
+		unsigned int elts_head_next;
 		uintptr_t addr;
 		uint32_t length;
 		uint32_t lkey;
 
+		/*
+		 * Make sure there is enough room to store this packet and
+		 * that one ring entry remains unused.
+		 */
+		if (max < 1 + 1)
+			break;
+		--max;
+		--pkts_n;
+		buf = *(pkts++);
+		elts_head_next = (elts_head + 1) & (elts_n - 1);
 		wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
 		tx_prefetch_wqe(txq, txq->wqe_ci);
 		tx_prefetch_wqe(txq, txq->wqe_ci + 1);
-		if (i + 1 < max)
-			rte_prefetch0(pkts[i + 1]);
+		if (pkts_n)
+			rte_prefetch0(*pkts);
 		/* Should we enable HW CKSUM offload */
 		if (buf->ol_flags &
 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
@@ -750,8 +752,8 @@ mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		/* Update element. */
 		(*txq->elts)[elts_head] = buf;
 		/* Prefetch next buffer data. */
-		if (i + 1 < max)
-			rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 1],
+		if (pkts_n)
+			rte_prefetch0(rte_pktmbuf_mtod(*pkts,
 						       volatile void *));
 		if (length <= max_inline) {
 			if (buf->ol_flags & PKT_TX_VLAN_PKT)
@@ -771,12 +773,12 @@ mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		}
 		wqe->inl.ctrl.data[2] = 0;
 		elts_head = elts_head_next;
-		buf = pkts[i + 1];
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		/* Increment sent bytes counter. */
 		txq->stats.obytes += length;
 #endif
-	}
+		++i;
+	} while (pkts_n);
 	/* Take a shortcut if nothing must be sent. */
 	if (unlikely(i == 0))
 		return 0;
@@ -887,13 +889,15 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	struct txq *txq = (struct txq *)dpdk_txq;
 	uint16_t elts_head = txq->elts_head;
 	const unsigned int elts_n = txq->elts_n;
-	unsigned int i;
+	unsigned int i = 0;
 	unsigned int max;
 	unsigned int comp;
 	struct mlx5_mpw mpw = {
 		.state = MLX5_MPW_STATE_CLOSED,
 	};
 
+	if (unlikely(!pkts_n))
+		return 0;
 	/* Prefetch first packet cacheline. */
 	tx_prefetch_cqe(txq, txq->cq_ci);
 	tx_prefetch_wqe(txq, txq->wqe_ci);
@@ -903,22 +907,24 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	max = (elts_n - (elts_head - txq->elts_tail));
 	if (max > elts_n)
 		max -= elts_n;
-	assert(max >= 1);
-	assert(max <= elts_n);
-	/* Always leave one free entry in the ring. */
-	--max;
-	if (max == 0)
-		return 0;
-	if (max > pkts_n)
-		max = pkts_n;
-	for (i = 0; (i != max); ++i) {
-		struct rte_mbuf *buf = pkts[i];
+	do {
+		struct rte_mbuf *buf;
 		volatile struct mlx5_wqe_data_seg *dseg;
-		unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1);
+		unsigned int elts_head_next;
 		uintptr_t addr;
 		uint32_t length;
 		uint32_t cs_flags = 0;
 
+		/*
+		 * Make sure there is enough room to store this packet and
+		 * that one ring entry remains unused.
+		 */
+		if (max < 1 + 1)
+			break;
+		--max;
+		--pkts_n;
+		buf = *(pkts++);
+		elts_head_next = (elts_head + 1) & (elts_n - 1);
 		/* Should we enable HW CKSUM offload */
 		if (buf->ol_flags &
 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
@@ -951,7 +957,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		/* Increment sent bytes counter. */
 		txq->stats.obytes += length;
 #endif
-	}
+		++i;
+	} while (pkts_n);
 	/* Take a shortcut if nothing must be sent. */
 	if (unlikely(i == 0))
 		return 0;
@@ -1059,7 +1066,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 	struct txq *txq = (struct txq *)dpdk_txq;
 	uint16_t elts_head = txq->elts_head;
 	const unsigned int elts_n = txq->elts_n;
-	unsigned int i;
+	unsigned int i = 0;
 	unsigned int max;
 	unsigned int comp;
 	unsigned int inline_room = txq->max_inline;
@@ -1067,6 +1074,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 		.state = MLX5_MPW_STATE_CLOSED,
 	};
 
+	if (unlikely(!pkts_n))
+		return 0;
 	/* Prefetch first packet cacheline. */
 	tx_prefetch_cqe(txq, txq->cq_ci);
 	tx_prefetch_wqe(txq, txq->wqe_ci);
@@ -1076,21 +1085,23 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 	max = (elts_n - (elts_head - txq->elts_tail));
 	if (max > elts_n)
 		max -= elts_n;
-	assert(max >= 1);
-	assert(max <= elts_n);
-	/* Always leave one free entry in the ring. */
-	--max;
-	if (max == 0)
-		return 0;
-	if (max > pkts_n)
-		max = pkts_n;
-	for (i = 0; (i != max); ++i) {
-		struct rte_mbuf *buf = pkts[i];
-		unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1);
+	do {
+		struct rte_mbuf *buf;
+		unsigned int elts_head_next;
 		uintptr_t addr;
 		uint32_t length;
 		uint32_t cs_flags = 0;
 
+		/*
+		 * Make sure there is enough room to store this packet and
+		 * that one ring entry remains unused.
+		 */
+		if (max < 1 + 1)
+			break;
+		--max;
+		--pkts_n;
+		buf = *(pkts++);
+		elts_head_next = (elts_head + 1) & (elts_n - 1);
 		/* Should we enable HW CKSUM offload */
 		if (buf->ol_flags &
 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
@@ -1177,7 +1188,8 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 		/* Increment sent bytes counter. */
 		txq->stats.obytes += length;
 #endif
-	}
+		++i;
+	} while (pkts_n);
 	/* Take a shortcut if nothing must be sent. */
 	if (unlikely(i == 0))
 		return 0;
-- 
2.1.4