DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline
@ 2017-01-08 15:41 Elad Persiko
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 2/5] net/mlx5: remove unessecary goto label Elad Persiko
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Elad Persiko @ 2017-01-08 15:41 UTC (permalink / raw)
  To: dev; +Cc: Elad Persiko

Prior to this patch, when sending a packet and the following
conditions were reached:
	1. last working queue element is used.
	2. inline was requested by the user
	3. no room for inline packet.
then the inline request was ignored and the packet was sent
by pointer completely.

This patch handles this scenario. In this case the last
work queue element is turned to be a null work queue element and
the packet is being sent after the wrap around.

Signed-off-by: Elad Persiko <eladpe@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 12 ++++++++++++
 drivers/net/mlx5/mlx5_txq.c  |  8 ++++++--
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index e0ee2f2..be38aed 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -481,6 +481,17 @@
 				pkt_inline_sz += copy_b;
 				/* Sanity check. */
 				assert(addr <= addr_end);
+			} else {
+				wqe->ctrl = (rte_v128u32_t){
+					htonl(txq->wqe_ci << 8),
+					htonl(txq->qp_num_8s | 1),
+					0,
+					0,
+					};
+				length = 0;
+				buf = *(pkts--);
+				ds = 1;
+				goto next_pkt_part;
 			}
 			/*
 			 * 2 DWORDs consumed by the WQE header + ETH segment +
@@ -577,6 +588,7 @@
 			0,
 			0,
 		};
+next_pkt_part:
 		wqe->eseg = (rte_v128u32_t){
 			0,
 			cs_flags,
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 949035b..951e50a 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -314,8 +314,12 @@
 		/* CQ to be associated with the receive queue. */
 		.recv_cq = tmpl.cq,
 		.cap = {
-			/* Max number of outstanding WRs. */
-			.max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
+			/*
+			 * Max number of outstanding WRs.
+			 * "+1" for null WQE place holder.
+			 */
+			.max_send_wr = ((priv->device_attr.max_qp_wr <
+					(desc + 1)) ?
 					priv->device_attr.max_qp_wr :
 					desc),
 			/*
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 2/5] net/mlx5: remove unessecary goto label
  2017-01-08 15:41 [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Elad Persiko
@ 2017-01-08 15:42 ` Elad Persiko
  2017-01-09 12:29   ` Ferruh Yigit
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 3/5] net/mlx5: support TSO in control plane Elad Persiko
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 8+ messages in thread
From: Elad Persiko @ 2017-01-08 15:42 UTC (permalink / raw)
  To: dev; +Cc: Elad Persiko

use_dseg label can be deleted as it happens without goto.

Signed-off-by: Elad Persiko <eladpe@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 24 +++++++++++-------------
 1 file changed, 11 insertions(+), 13 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index be38aed..1560530 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -505,7 +505,6 @@
 				if ((uintptr_t)dseg >= end)
 					dseg = (volatile rte_v128u32_t *)
 					       txq->wqes;
-				goto use_dseg;
 			} else if (!segs_n) {
 				goto next_pkt;
 			} else {
@@ -523,19 +522,18 @@
 			dseg = (volatile rte_v128u32_t *)
 				((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
 			ds = 3;
-use_dseg:
-			/* Add the remaining packet as a simple ds. */
-			addr = htonll(addr);
-			*dseg = (rte_v128u32_t){
-				htonl(length),
-				txq_mp2mr(txq, txq_mb2mp(buf)),
-				addr,
-				addr >> 32,
-			};
-			++ds;
-			if (!segs_n)
-				goto next_pkt;
 		}
+		/* Add the remaining packet as a simple ds. */
+		addr = htonll(addr);
+		*dseg = (rte_v128u32_t){
+			htonl(length),
+			txq_mp2mr(txq, txq_mb2mp(buf)),
+			addr,
+			addr >> 32,
+		};
+		++ds;
+		if (!segs_n)
+			goto next_pkt;
 next_seg:
 		assert(buf);
 		assert(ds);
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 3/5] net/mlx5: support TSO in control plane
  2017-01-08 15:41 [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Elad Persiko
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 2/5] net/mlx5: remove unessecary goto label Elad Persiko
@ 2017-01-08 15:42 ` Elad Persiko
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 4/5] net/mlx5: implement TSO data path Elad Persiko
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Elad Persiko @ 2017-01-08 15:42 UTC (permalink / raw)
  To: dev; +Cc: Elad Persiko

Signed-off-by: Elad Persiko <eladpe@mellanox.com>
---
 doc/guides/nics/mlx5.rst    |  6 ++++++
 drivers/net/mlx5/mlx5.c     | 17 ++++++++++++++++-
 drivers/net/mlx5/mlx5.h     |  1 +
 drivers/net/mlx5/mlx5_txq.c |  4 +++-
 4 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index a41c432..816075a 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -188,6 +188,12 @@ Run-time configuration
   It is currently only supported on the ConnectX-4 Lx and ConnectX-5
   families of adapters. Enabled by default.
 
+- ``txq_lso_en`` parameter [int]
+
+  A nonzero value enables TCP Segmentation Offloading (in hardware) on tx
+  side. It saves CPU time and PCI bandwidth.
+
+  Enabled by default.
 Prerequisites
 -------------
 
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 6293c1f..55c5b87 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -84,6 +84,9 @@
 /* Device parameter to enable multi-packet send WQEs. */
 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
 
+/* Device parameter to enable LSO. */
+#define MLX5_TXQ_LSO_EN "txq_lso_en"
+
 /**
  * Retrieve integer value from environment variable.
  *
@@ -287,6 +290,8 @@
 		priv->txqs_inline = tmp;
 	} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
 		priv->mps &= !!tmp; /* Enable MPW only if HW supports */
+	} else if (strcmp(MLX5_TXQ_LSO_EN, key) == 0) {
+		priv->lso &= !!tmp;
 	} else {
 		WARN("%s: unknown parameter", key);
 		return -EINVAL;
@@ -312,6 +317,7 @@
 		MLX5_RXQ_CQE_COMP_EN,
 		MLX5_TXQ_INLINE,
 		MLX5_TXQS_MIN_INLINE,
+		MLX5_TXQ_LSO_EN,
 		MLX5_TXQ_MPW_EN,
 		NULL,
 	};
@@ -429,7 +435,7 @@
 			mps = 0;
 		}
 		INFO("PCI information matches, using device \"%s\""
-		     " (SR-IOV: %s, MPS: %s)",
+		     " (SR-IOV: %s, LSO: true, MPS: %s)",
 		     list[i]->name,
 		     sriov ? "true" : "false",
 		     mps ? "true" : "false");
@@ -474,8 +480,11 @@
 			IBV_EXP_DEVICE_ATTR_RX_HASH |
 			IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
 			IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
+			IBV_EXP_DEVICE_ATTR_TSO_CAPS |
 			0;
 
+		exp_device_attr.tso_caps.max_tso = 262144;
+		exp_device_attr.tso_caps.supported_qpts =  IBV_QPT_RAW_ETH;
 		DEBUG("using port %u (%08" PRIx32 ")", port, test);
 
 		ctx = ibv_open_device(ibv_dev);
@@ -525,6 +534,7 @@
 		priv->port = port;
 		priv->pd = pd;
 		priv->mtu = ETHER_MTU;
+		priv->lso = 1; /* Enabled by default. */
 		priv->mps = mps; /* Enable MPW by default if supported. */
 		priv->cqe_comp = 1; /* Enable compression by default. */
 		err = mlx5_args(priv, pci_dev->device.devargs);
@@ -580,6 +590,11 @@
 			err = ENOTSUP;
 			goto port_error;
 		}
+		if (priv->lso && priv->mps) {
+			ERROR("LSO and MPS can't coexists");
+			err = ENOTSUP;
+			goto port_error;
+		}
 		/* Allocate and register default RSS hash keys. */
 		priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
 					    sizeof((*priv->rss_conf)[0]), 0);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index ee62e04..a163983 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -116,6 +116,7 @@ struct priv {
 	unsigned int hw_padding:1; /* End alignment padding is supported. */
 	unsigned int sriov:1; /* This is a VF or PF with VF devices. */
 	unsigned int mps:1; /* Whether multi-packet send is supported. */
+	unsigned int lso:1; /* Whether lso is supported. */
 	unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
 	unsigned int pending_alarm:1; /* An alarm is pending. */
 	unsigned int txq_inline; /* Maximum packet size for inlining. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 951e50a..de9f494 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -337,8 +337,10 @@
 		.sq_sig_all = 0,
 		.pd = priv->pd,
 		.res_domain = tmpl.rd,
+		.max_tso_header = 128,  // ETH/IPv4/TCP header example
 		.comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
-			      IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
+			      IBV_EXP_QP_INIT_ATTR_RES_DOMAIN |
+			      IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER),
 	};
 	if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
 		tmpl.txq.max_inline =
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 4/5] net/mlx5: implement TSO data path
  2017-01-08 15:41 [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Elad Persiko
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 2/5] net/mlx5: remove unessecary goto label Elad Persiko
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 3/5] net/mlx5: support TSO in control plane Elad Persiko
@ 2017-01-08 15:42 ` Elad Persiko
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 5/5] doc: add tso capabilities feature for mlx5 Elad Persiko
  2017-01-23 12:27 ` [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Ferruh Yigit
  4 siblings, 0 replies; 8+ messages in thread
From: Elad Persiko @ 2017-01-08 15:42 UTC (permalink / raw)
  To: dev; +Cc: Elad Persiko

Signed-off-by: Elad Persiko <eladpe@mellanox.com>
---
 drivers/net/mlx5/mlx5_ethdev.c |   2 +
 drivers/net/mlx5/mlx5_rxtx.c   | 246 +++++++++++++++++++++++++++++++----------
 2 files changed, 187 insertions(+), 61 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index fbb1b65..ea5ab02 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -589,6 +589,8 @@ struct priv *
 		(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0);
 	if (!priv->mps)
 		info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+	if (priv->lso)
+		info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
 	if (priv->hw_csum)
 		info->tx_offload_capa |=
 			(DEV_TX_OFFLOAD_IPV4_CKSUM |
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 1560530..4940dc1 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -388,6 +388,8 @@
 		uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE;
 		uint16_t ehdr;
 		uint8_t cs_flags = 0;
+		uint8_t header_sum;
+		uint8_t tso;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		uint32_t total_length = 0;
 #endif
@@ -429,37 +431,29 @@
 			pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *);
 			rte_prefetch0(pkt_addr);
 		}
-		/* Should we enable HW CKSUM offload */
-		if (buf->ol_flags &
-		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
-			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
-		}
-		raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
-		/*
-		 * Start by copying the Ethernet header minus the first two
-		 * bytes which will be appended at the end of the Ethernet
-		 * segment.
-		 */
-		memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2, 16);
-		length -= MLX5_WQE_DWORD_SIZE;
-		addr += MLX5_WQE_DWORD_SIZE;
-		/* Replace the Ethernet type by the VLAN if necessary. */
-		if (buf->ol_flags & PKT_TX_VLAN_PKT) {
-			uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
-
-			memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE - 2 -
-					   sizeof(vlan)),
-			       &vlan, sizeof(vlan));
-			addr -= sizeof(vlan);
-			length += sizeof(vlan);
-		}
-		/* Inline if enough room. */
-		if (txq->max_inline != 0) {
+		tso = buf->tso_segsz && buf->l4_len;
+		if (tso) {
+			/*
+			 * After copying the ETH seg we need to copy 16 bytes
+			 * less.
+			 */
+			header_sum = buf->l2_len + buf->l3_len + buf->l4_len
+				     - MLX5_WQE_DWORD_SIZE;
+			raw = ((uint8_t *)(uintptr_t)wqe) +
+			      2 * MLX5_WQE_DWORD_SIZE;
+			/*
+			 * Start by copying the Ethernet header minus the
+			 * first two bytes which will be appended at the
+			 * end of the Ethernet segment.
+			 */
+			memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
+			       MLX5_WQE_DWORD_SIZE);
+			length -= MLX5_WQE_DWORD_SIZE;
+			addr += MLX5_WQE_DWORD_SIZE;
+
 			uintptr_t end = (uintptr_t)
 				(((uintptr_t)txq->wqes) +
 				 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
-			uint16_t max_inline =
-				txq->max_inline * RTE_CACHE_LINE_SIZE;
 			uint16_t room;
 
 			/*
@@ -468,12 +462,9 @@
 			 */
 			raw += MLX5_WQE_DWORD_SIZE - 2;
 			room = end - (uintptr_t)raw;
-			if (room > max_inline) {
-				uintptr_t addr_end = (addr + max_inline) &
-					~(RTE_CACHE_LINE_SIZE - 1);
-				uint16_t copy_b = ((addr_end - addr) > length) ?
-						  length :
-						  (addr_end - addr);
+			if (room > header_sum) {
+				uintptr_t addr_end = addr + header_sum;
+				uint16_t copy_b = addr_end - addr;
 
 				rte_memcpy((void *)raw, (void *)addr, copy_b);
 				addr += copy_b;
@@ -488,10 +479,16 @@
 					0,
 					0,
 					};
+				wqe->eseg = (rte_v128u32_t){
+					0,
+					0,
+					0,
+					0};
 				length = 0;
 				buf = *(pkts--);
 				ds = 1;
-				goto next_pkt_part;
+				elts_head = (elts_head - 1) & (elts_n - 1);
+				goto next_pkt_end;
 			}
 			/*
 			 * 2 DWORDs consumed by the WQE header + ETH segment +
@@ -500,28 +497,138 @@
 			ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
 			if (length > 0) {
 				dseg = (volatile rte_v128u32_t *)
-					((uintptr_t)wqe +
-					 (ds * MLX5_WQE_DWORD_SIZE));
-				if ((uintptr_t)dseg >= end)
-					dseg = (volatile rte_v128u32_t *)
-					       txq->wqes;
+				       ((uintptr_t)wqe +
+				       (ds * MLX5_WQE_DWORD_SIZE));
+			if ((uintptr_t)dseg >= end)
+				dseg = (volatile rte_v128u32_t *)
+					txq->wqes;
 			} else if (!segs_n) {
 				goto next_pkt;
 			} else {
 				/* dseg will be advance as part of next_seg */
 				dseg = (volatile rte_v128u32_t *)
-					((uintptr_t)wqe +
-					 ((ds - 1) * MLX5_WQE_DWORD_SIZE));
+				       ((uintptr_t)wqe +
+					((ds - 1) * MLX5_WQE_DWORD_SIZE));
 				goto next_seg;
 			}
 		} else {
+			/* Should we enable HW CKSUM offload */
+			if (buf->ol_flags &
+			    (PKT_TX_IP_CKSUM |
+			     PKT_TX_TCP_CKSUM |
+			     PKT_TX_UDP_CKSUM)) {
+				cs_flags = MLX5_ETH_WQE_L3_CSUM |
+					   MLX5_ETH_WQE_L4_CSUM;
+			}
+			raw = ((uint8_t *)(uintptr_t)wqe) +
+			      2 * MLX5_WQE_DWORD_SIZE;
 			/*
-			 * No inline has been done in the packet, only the
-			 * Ethernet Header as been stored.
+			 * Start by copying the Ethernet header minus the
+			 * first two bytes which will be appended at the end
+			 * of the Ethernet segment.
 			 */
-			dseg = (volatile rte_v128u32_t *)
-				((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
-			ds = 3;
+			memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2, 16);
+			length -= MLX5_WQE_DWORD_SIZE;
+			addr += MLX5_WQE_DWORD_SIZE;
+			/* Replace the Ethernet type by the VLAN if necessary.
+			 */
+			if (buf->ol_flags & PKT_TX_VLAN_PKT) {
+				uint32_t vlan = htonl(0x81000000 |
+						      buf->vlan_tci);
+
+				memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE -
+						   2 - sizeof(vlan)),
+				       &vlan, sizeof(vlan));
+				addr -= sizeof(vlan);
+				length += sizeof(vlan);
+			}
+			/* Inline if enough room. */
+			if (txq->max_inline != 0) {
+				uintptr_t end = (uintptr_t)
+					    (((uintptr_t)txq->wqes) +
+					    (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+				uint16_t max_inline =
+					 txq->max_inline * RTE_CACHE_LINE_SIZE;
+				uint16_t room;
+
+				/*
+				 * raw starts two bytes before the boundary to
+				 * continue the above copy of packet data.
+				 */
+				raw += MLX5_WQE_DWORD_SIZE - 2;
+				room = end - (uintptr_t)raw;
+				if (room > max_inline) {
+					uintptr_t addr_end =
+						(addr + max_inline) &
+						~(RTE_CACHE_LINE_SIZE - 1);
+					uint16_t copy_b = ((addr_end - addr)
+							  > length) ?
+							  length :
+							  (addr_end - addr);
+
+					rte_memcpy((void *)raw, (void *)addr,
+						   copy_b);
+					addr += copy_b;
+					length -= copy_b;
+					pkt_inline_sz += copy_b;
+					/* Sanity check. */
+					assert(addr <= addr_end);
+				} else {
+					wqe->ctrl = (rte_v128u32_t){
+						htonl(txq->wqe_ci << 8),
+						htonl(txq->qp_num_8s | 1),
+						0,
+						0,
+						};
+					wqe->eseg = (rte_v128u32_t){
+						0,
+						0,
+						0,
+						0};
+					length = 0;
+					buf = *(pkts--);
+					ds = 1;
+					elts_head = (elts_head - 1) &
+						    (elts_n - 1);
+					goto next_pkt_end;
+				}
+				/*
+				 * 2 DWORDs consumed by the WQE header
+				 * + ETH segment + 1 DSEG +
+				 * the size of the inline part of the packet.
+				 */
+				ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
+				if (length > 0) {
+					dseg = (volatile rte_v128u32_t *)
+						((uintptr_t)wqe +
+						 (ds * MLX5_WQE_DWORD_SIZE));
+					if ((uintptr_t)dseg >= end)
+						dseg =
+						  (volatile rte_v128u32_t *)
+						  txq->wqes;
+				} else if (!segs_n) {
+					goto next_pkt;
+				} else {
+					/*
+					 * dseg will be advance as part
+					 * of next_seg.
+					 */
+					dseg = (volatile rte_v128u32_t *)
+					       ((uintptr_t)wqe +
+						((ds - 1) *
+						 MLX5_WQE_DWORD_SIZE));
+					goto next_seg;
+				}
+			} else {
+				/*
+				 * No inline has been done in the packet,
+				 * only the Ethernet Header as been stored.
+				 */
+				dseg = (volatile rte_v128u32_t *)
+					((uintptr_t)wqe +
+					 (3 * MLX5_WQE_DWORD_SIZE));
+				ds = 3;
+			}
 		}
 		/* Add the remaining packet as a simple ds. */
 		addr = htonll(addr);
@@ -578,21 +685,38 @@
 		else
 			--pkts_n;
 next_pkt:
-		++i;
 		/* Initialize known and common part of the WQE structure. */
-		wqe->ctrl = (rte_v128u32_t){
-			htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
-			htonl(txq->qp_num_8s | ds),
-			0,
-			0,
-		};
-next_pkt_part:
-		wqe->eseg = (rte_v128u32_t){
-			0,
-			cs_flags,
-			0,
-			(ehdr << 16) | htons(pkt_inline_sz),
-		};
+		if (tso) {
+			wqe->ctrl = (rte_v128u32_t){
+				htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
+				htonl(txq->qp_num_8s | ds),
+				0,
+				0,
+			};
+			wqe->eseg = (rte_v128u32_t){
+				0,
+				MLX5_ETH_WQE_L3_CSUM |
+					MLX5_ETH_WQE_L4_CSUM |
+					htons(buf->tso_segsz) << 16,
+				0,
+				(ehdr << 16) | htons(pkt_inline_sz),
+			};
+		} else {
+			wqe->ctrl = (rte_v128u32_t){
+				htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
+				htonl(txq->qp_num_8s | ds),
+				0,
+				0,
+			};
+			wqe->eseg = (rte_v128u32_t){
+				0,
+				cs_flags,
+				0,
+				(ehdr << 16) | htons(pkt_inline_sz),
+			};
+		}
+next_pkt_end:
+		++i;
 		txq->wqe_ci += (ds + 3) / 4;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		/* Increment sent bytes counter. */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 5/5] doc: add tso capabilities feature for mlx5
  2017-01-08 15:41 [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Elad Persiko
                   ` (2 preceding siblings ...)
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 4/5] net/mlx5: implement TSO data path Elad Persiko
@ 2017-01-08 15:42 ` Elad Persiko
  2017-01-23 12:27 ` [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Ferruh Yigit
  4 siblings, 0 replies; 8+ messages in thread
From: Elad Persiko @ 2017-01-08 15:42 UTC (permalink / raw)
  To: dev; +Cc: Elad Persiko

Feature implemented at:
b007e98ccda9 ("net/mlx5: implement TSO data path")
085c4137280a ("net/mlx5: support TSO in control plane")

Signed-off-by: Elad Persiko <eladpe@mellanox.com>
---
 doc/guides/nics/features/mlx4.ini | 1 +
 doc/guides/nics/features/mlx5.ini | 1 +
 2 files changed, 2 insertions(+)

diff --git a/doc/guides/nics/features/mlx4.ini b/doc/guides/nics/features/mlx4.ini
index c9828f7..d74b9dd 100644
--- a/doc/guides/nics/features/mlx4.ini
+++ b/doc/guides/nics/features/mlx4.ini
@@ -10,6 +10,7 @@ Queue start/stop     = Y
 MTU update           = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
+TSO                  = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 Unicast MAC filter   = Y
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index f811e3f..f8a215e 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -11,6 +11,7 @@ Queue start/stop     = Y
 MTU update           = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
+TSO                  = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 Unicast MAC filter   = Y
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH 2/5] net/mlx5: remove unessecary goto label
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 2/5] net/mlx5: remove unessecary goto label Elad Persiko
@ 2017-01-09 12:29   ` Ferruh Yigit
  0 siblings, 0 replies; 8+ messages in thread
From: Ferruh Yigit @ 2017-01-09 12:29 UTC (permalink / raw)
  To: Elad Persiko, dev

On 1/8/2017 3:42 PM, Elad Persiko wrote:
> use_dseg label can be deleted as it happens without goto.
> 
> Signed-off-by: Elad Persiko <eladpe@mellanox.com>

Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline
  2017-01-08 15:41 [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Elad Persiko
                   ` (3 preceding siblings ...)
  2017-01-08 15:42 ` [dpdk-dev] [PATCH 5/5] doc: add tso capabilities feature for mlx5 Elad Persiko
@ 2017-01-23 12:27 ` Ferruh Yigit
  2017-02-08 11:02   ` Ferruh Yigit
  4 siblings, 1 reply; 8+ messages in thread
From: Ferruh Yigit @ 2017-01-23 12:27 UTC (permalink / raw)
  To: Elad Persiko, dev, Adrien Mazarguil

On 1/8/2017 3:41 PM, Elad Persiko wrote:
> Prior to this patch, when sending a packet and the following
> conditions were reached:
> 	1. last working queue element is used.
> 	2. inline was requested by the user
> 	3. no room for inline packet.
> then the inline request was ignored and the packet was sent
> by pointer completely.
> 
> This patch handles this scenario. In this case the last
> work queue element is turned to be a null work queue element and
> the packet is being sent after the wrap around.
> 
> Signed-off-by: Elad Persiko <eladpe@mellanox.com>

It looks like this series missing driver maintainer CC'ed.

Cc: Adrien Mazarguil <adrien.mazarguil@6wind.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline
  2017-01-23 12:27 ` [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Ferruh Yigit
@ 2017-02-08 11:02   ` Ferruh Yigit
  0 siblings, 0 replies; 8+ messages in thread
From: Ferruh Yigit @ 2017-02-08 11:02 UTC (permalink / raw)
  To: Elad Persiko, dev, Adrien Mazarguil

On 1/23/2017 12:27 PM, Ferruh Yigit wrote:
> On 1/8/2017 3:41 PM, Elad Persiko wrote:
>> Prior to this patch, when sending a packet and the following
>> conditions were reached:
>> 	1. last working queue element is used.
>> 	2. inline was requested by the user
>> 	3. no room for inline packet.
>> then the inline request was ignored and the packet was sent
>> by pointer completely.
>>
>> This patch handles this scenario. In this case the last
>> work queue element is turned to be a null work queue element and
>> the packet is being sent after the wrap around.
>>
>> Signed-off-by: Elad Persiko <eladpe@mellanox.com>
> 
> It looks like this series missing driver maintainer CC'ed.
> 
> Cc: Adrien Mazarguil <adrien.mazarguil@6wind.com>

This series postponed to next release (17.05).

There was an expected update to merge with another patch [1], not
received in this release time frame.


[1]
http://dpdk.org/dev/patchwork/patch/19009/


Thanks,
ferruh

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2017-02-08 11:02 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-08 15:41 [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Elad Persiko
2017-01-08 15:42 ` [dpdk-dev] [PATCH 2/5] net/mlx5: remove unessecary goto label Elad Persiko
2017-01-09 12:29   ` Ferruh Yigit
2017-01-08 15:42 ` [dpdk-dev] [PATCH 3/5] net/mlx5: support TSO in control plane Elad Persiko
2017-01-08 15:42 ` [dpdk-dev] [PATCH 4/5] net/mlx5: implement TSO data path Elad Persiko
2017-01-08 15:42 ` [dpdk-dev] [PATCH 5/5] doc: add tso capabilities feature for mlx5 Elad Persiko
2017-01-23 12:27 ` [dpdk-dev] [PATCH 1/5] net/mlx5: last WQE no room inline Ferruh Yigit
2017-02-08 11:02   ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).