DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO
@ 2018-04-08 12:41 Xueming Li
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 1/3] net/mlx5: separate TSO function in Tx data path Xueming Li
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Xueming Li @ 2018-04-08 12:41 UTC (permalink / raw)
  To: Yongseok Koh, Shahaf Shuler; +Cc: Xueming Li, dev

This patchset introduced Tx generic tunnel checksum and TSO offload to mlx5 PMD.

This patchset relies on new ethdev API of:
	http://www.dpdk.org/dev/patchwork/patch/37519/

Xueming Li (3):
  net/mlx5: separate TSO function in Tx data path
  net/mlx5: support generic tunnel offloading
  net/mlx5: allow max 192B TSO inline header length

 drivers/net/mlx5/Makefile             |   5 +
 drivers/net/mlx5/mlx5.c               |  14 ++-
 drivers/net/mlx5/mlx5.h               |   1 +
 drivers/net/mlx5/mlx5_defs.h          |   2 +-
 drivers/net/mlx5/mlx5_ethdev.c        |   5 +-
 drivers/net/mlx5/mlx5_prm.h           |  24 ++++
 drivers/net/mlx5/mlx5_rxtx.c          | 208 ++++++++++++++++++++++++----------
 drivers/net/mlx5/mlx5_rxtx.h          | 100 ++++++++++++----
 drivers/net/mlx5/mlx5_rxtx_vec.c      |   9 +-
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h |   2 +-
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h  |   2 +-
 drivers/net/mlx5/mlx5_txq.c           |  10 +-
 12 files changed, 289 insertions(+), 93 deletions(-)

-- 
2.13.3

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 1/3] net/mlx5: separate TSO function in Tx data path
  2018-04-08 12:41 [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO Xueming Li
@ 2018-04-08 12:41 ` Xueming Li
  2018-04-24  9:17   ` Yongseok Koh
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 2/3] net/mlx5: support generic tunnel offloading Xueming Li
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Xueming Li @ 2018-04-08 12:41 UTC (permalink / raw)
  To: Yongseok Koh, Shahaf Shuler; +Cc: Xueming Li, dev

Separate TSO function to make logic of mlx5_tx_burst clear.

Signed-off-by: Xueming Li <xuemingl@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 112 ++++++++++++++++++++++++++-----------------
 1 file changed, 67 insertions(+), 45 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 1f422c70b..a9de69131 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -259,6 +259,66 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
 }
 
 /**
+ * Inline TSO headers into WQE.
+ *
+ * @return
+ *   0 on success, negative errno value on failure.
+ */
+static int
+inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+	   uint32_t *length,
+	   uint8_t *cs_flags,
+	   uintptr_t *addr,
+	   uint16_t *pkt_inline_sz,
+	   uint8_t **raw,
+	   uint16_t *max_wqe,
+	   uint16_t *tso_segsz,
+	   uint16_t *tso_header_sz)
+{
+	uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) +
+				    (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+	unsigned int copy_b;
+	uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
+	const uint8_t tunneled = txq->tunnel_en &&
+				 (buf->ol_flags & (PKT_TX_TUNNEL_GRE |
+						   PKT_TX_TUNNEL_VXLAN));
+	uint16_t n_wqe;
+
+	*tso_segsz = buf->tso_segsz;
+	*tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len;
+	if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) {
+		txq->stats.oerrors++;
+		return -EINVAL;
+	}
+	if (tunneled) {
+		*tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
+		*cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+	} else {
+		*cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+	}
+	if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER)) {
+		txq->stats.oerrors++;
+		return -EINVAL;
+	}
+	copy_b = *tso_header_sz - *pkt_inline_sz;
+	/* First seg must contain all TSO headers. */
+	assert(copy_b <= *length);
+	if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
+		return -EAGAIN;
+	n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+	if (unlikely(*max_wqe < n_wqe))
+		return -EINVAL;
+	*max_wqe -= n_wqe;
+	rte_memcpy((void *)*raw, (void *)*addr, copy_b);
+	*length -= copy_b;
+	*addr += copy_b;
+	copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE;
+	*pkt_inline_sz += copy_b;
+	*raw += copy_b;
+	return 0;
+}
+
+/**
  * DPDK callback to check the status of a tx descriptor.
  *
  * @param tx_queue
@@ -392,6 +452,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		uint32_t total_length = 0;
 #endif
+		int ret;
 
 		/* first_seg */
 		buf = *pkts;
@@ -457,52 +518,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		raw += MLX5_WQE_DWORD_SIZE;
 		tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
 		if (tso) {
-			uintptr_t end =
-				(uintptr_t)(((uintptr_t)txq->wqes) +
-					    (1 << txq->wqe_n) * MLX5_WQE_SIZE);
-			unsigned int copy_b;
-			uint8_t vlan_sz =
-				(buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
-			const uint64_t is_tunneled =
-				buf->ol_flags & (PKT_TX_TUNNEL_GRE |
-						 PKT_TX_TUNNEL_VXLAN);
-
-			tso_header_sz = buf->l2_len + vlan_sz +
-					buf->l3_len + buf->l4_len;
-			tso_segsz = buf->tso_segsz;
-			if (unlikely(tso_segsz == 0)) {
-				txq->stats.oerrors++;
+			ret = inline_tso(txq, buf, &length, &cs_flags,
+					 &addr, &pkt_inline_sz,
+					 &raw, &max_wqe,
+					 &tso_segsz, &tso_header_sz);
+			if (ret == -EINVAL) {
 				break;
-			}
-			if (is_tunneled	&& txq->tunnel_en) {
-				tso_header_sz += buf->outer_l2_len +
-						 buf->outer_l3_len;
-				cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
-			} else {
-				cs_flags |= MLX5_ETH_WQE_L4_CSUM;
-			}
-			if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) {
-				txq->stats.oerrors++;
-				break;
-			}
-			copy_b = tso_header_sz - pkt_inline_sz;
-			/* First seg must contain all headers. */
-			assert(copy_b <= length);
-			if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
-				uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
-
-				if (unlikely(max_wqe < n))
-					break;
-				max_wqe -= n;
-				rte_memcpy((void *)raw, (void *)addr, copy_b);
-				addr += copy_b;
-				length -= copy_b;
-				/* Include padding for TSO header. */
-				copy_b = MLX5_WQE_DS(copy_b) *
-					 MLX5_WQE_DWORD_SIZE;
-				pkt_inline_sz += copy_b;
-				raw += copy_b;
-			} else {
+			} else if (ret == -EAGAIN) {
 				/* NOP WQE. */
 				wqe->ctrl = (rte_v128u32_t){
 					rte_cpu_to_be_32(txq->wqe_ci << 8),
-- 
2.13.3

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 2/3] net/mlx5: support generic tunnel offloading
  2018-04-08 12:41 [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO Xueming Li
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 1/3] net/mlx5: separate TSO function in Tx data path Xueming Li
@ 2018-04-08 12:41 ` Xueming Li
  2018-04-24 10:27   ` Yongseok Koh
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 3/3] net/mlx5: allow max 192B TSO inline header length Xueming Li
  2018-04-24 16:51 ` [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO Shahaf Shuler
  3 siblings, 1 reply; 8+ messages in thread
From: Xueming Li @ 2018-04-08 12:41 UTC (permalink / raw)
  To: Yongseok Koh, Shahaf Shuler; +Cc: Xueming Li, dev

This commit adds support for generic tunnel TSO and checksum offload.
PMD will compute the inner/outer headers offset according to the
mbuf fields. Hardware will do calculation based on offsets and types.

Signed-off-by: Xueming Li <xuemingl@mellanox.com>
---
 drivers/net/mlx5/Makefile             |   5 ++
 drivers/net/mlx5/mlx5.c               |  14 +++-
 drivers/net/mlx5/mlx5.h               |   1 +
 drivers/net/mlx5/mlx5_ethdev.c        |   5 +-
 drivers/net/mlx5/mlx5_prm.h           |  24 +++++++
 drivers/net/mlx5/mlx5_rxtx.c          | 122 ++++++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5_rxtx.h          | 100 ++++++++++++++++++++++------
 drivers/net/mlx5/mlx5_rxtx_vec.c      |   9 +--
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h |   2 +-
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h  |   2 +-
 drivers/net/mlx5/mlx5_txq.c           |  10 ++-
 11 files changed, 234 insertions(+), 60 deletions(-)

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 201f6f06a..cc128ef69 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -135,6 +135,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
 		enum IBV_WQ_FLAG_RX_END_PADDING \
 		$(AUTOCONF_OUTPUT)
 	$Q sh -- '$<' '$@' \
+		HAVE_IBV_MLX5_MOD_SWP \
+		infiniband/mlx5dv.h \
+		enum MLX5DV_CONTEXT_MASK_SWP \
+		$(AUTOCONF_OUTPUT)
+	$Q sh -- '$<' '$@' \
 		HAVE_IBV_MLX5_MOD_MPW \
 		infiniband/mlx5dv.h \
 		enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 7d58d66bb..d886ddd4f 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -600,6 +600,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	unsigned int mps;
 	unsigned int cqe_comp;
 	unsigned int tunnel_en = 0;
+	unsigned int swp = 0;
 	int idx;
 	int i;
 	struct mlx5dv_context attrs_out = {0};
@@ -667,6 +668,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	}
 	ibv_dev = list[i];
 	DRV_LOG(DEBUG, "device opened");
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+	attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
+#endif
 	/*
 	 * Multi-packet send is supported by ConnectX-4 Lx PF as well
 	 * as all ConnectX-5 devices.
@@ -687,6 +691,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		DRV_LOG(DEBUG, "MPW isn't supported");
 		mps = MLX5_MPW_DISABLED;
 	}
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+	if (attrs_out.comp_mask | MLX5DV_CONTEXT_MASK_SWP)
+		swp = attrs_out.sw_parsing_caps.sw_parsing_offloads;
+	DRV_LOG(DEBUG, "SWP support: %u", swp);
+#endif
 	if (RTE_CACHE_LINE_SIZE == 128 &&
 	    !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
 		cqe_comp = 0;
@@ -733,6 +742,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 			.txq_inline = MLX5_ARG_UNSET,
 			.txqs_inline = MLX5_ARG_UNSET,
 			.inline_max_packet_sz = MLX5_ARG_UNSET,
+			.swp = !!swp,
 		};
 
 		len = snprintf(name, sizeof(name), PCI_PRI_FMT,
@@ -1182,8 +1192,10 @@ RTE_INIT(rte_mlx5_pmd_init);
 static void
 rte_mlx5_pmd_init(void)
 {
-	/* Build the static table for ptype conversion. */
+	/* Build the static tables for Verbs conversion. */
 	mlx5_set_ptype_table();
+	mlx5_set_cksum_table();
+	mlx5_set_swp_types_table();
 	/*
 	 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
 	 * huge pages. Calling ibv_fork_init() during init allows
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index faacfd9d6..b5e5e0b6c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -87,6 +87,7 @@ struct mlx5_dev_config {
 	unsigned int tx_vec_en:1; /* Tx vector is enabled. */
 	unsigned int rx_vec_en:1; /* Rx vector is enabled. */
 	unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+	unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
 	unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
 	unsigned int ind_table_max_size; /* Maximum indirection table size. */
 	int txq_inline; /* Maximum packet size for inlining. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index b6f5101cf..aecfdc1d4 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1063,11 +1063,14 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
 	int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
 				    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
 				    DEV_TX_OFFLOAD_GRE_TNL_TSO));
+	int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
+				    DEV_TX_OFFLOAD_UDP_TNL_TSO |
+				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
 	int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
 
 	assert(priv != NULL);
 	/* Select appropriate TX function. */
-	if (vlan_insert || tso)
+	if (vlan_insert || tso || swp)
 		return tx_pkt_burst;
 	if (config->mps == MLX5_MPW_ENHANCED) {
 		if (mlx5_check_vec_tx_support(dev) > 0) {
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 9eb9c15e1..2129d74a3 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -107,6 +107,30 @@
 /* Inner L4 checksum offload (Tunneled packets only). */
 #define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
 
+/* Outer L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_OUTER_TCP  (0u << 5)
+
+/* Outer L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_OUTER_UDP  (1u << 5)
+
+/* Outer L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
+
+/* Outer L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
+
+/* Inner L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
+
+/* Inner L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
+
+/* Inner L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
+
+/* Inner L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
+
 /* Is flow mark valid. */
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
 #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index a9de69131..d1dc7d327 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -47,6 +47,9 @@ uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
 	[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
 };
 
+uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
+uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
+
 /**
  * Build a table to translate Rx completion flags to packet type.
  *
@@ -203,6 +206,74 @@ mlx5_set_ptype_table(void)
 }
 
 /**
+ * Build a table to translate packet to checksum type of Verbs.
+ */
+void
+mlx5_set_cksum_table(void)
+{
+	unsigned int i;
+	uint8_t v;
+
+	/*
+	 * The index should have:
+	 * bit[0] = PKT_TX_TCP_SEG
+	 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+	 * bit[4] = PKT_TX_IP_CKSUM
+	 * bit[8] = PKT_TX_OUTER_IP_CKSUM
+	 * bit[9] = tunnel
+	 */
+	for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
+		v = 0;
+		if (i & (1 << 9)) {
+			/* Tunneled packet. */
+			if (i & (1 << 8)) /* Outer IP. */
+				v |= MLX5_ETH_WQE_L3_CSUM;
+			if (i & (1 << 4)) /* Inner IP. */
+				v |= MLX5_ETH_WQE_L3_INNER_CSUM;
+			if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+				v |= MLX5_ETH_WQE_L4_INNER_CSUM;
+		} else {
+			/* No tunnel. */
+			if (i & (1 << 4)) /* IP. */
+				v |= MLX5_ETH_WQE_L3_CSUM;
+			if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+				v |= MLX5_ETH_WQE_L4_CSUM;
+		}
+		mlx5_cksum_table[i] = v;
+	}
+}
+
+/**
+ * Build a table to translate packet type of mbuf to SWP type of Verbs.
+ */
+void
+mlx5_set_swp_types_table(void)
+{
+	unsigned int i;
+	uint8_t v;
+
+	/*
+	 * The index should have:
+	 * bit[0:1] = PKT_TX_L4_MASK
+	 * bit[4] = PKT_TX_IPV6
+	 * bit[8] = PKT_TX_OUTER_IPV6
+	 * bit[9] = PKT_TX_OUTER_UDP
+	 */
+	for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
+		v = 0;
+		if (i & (1 << 8))
+			v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
+		if (i & (1 << 9))
+			v |= MLX5_ETH_WQE_L4_OUTER_UDP;
+		if (i & (1 << 4))
+			v |= MLX5_ETH_WQE_L3_INNER_IPV6;
+		if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
+			v |= MLX5_ETH_WQE_L4_INNER_UDP;
+		mlx5_swp_types_table[i] = v;
+	}
+}
+
+/**
  * Return the size of tailroom of WQ.
  *
  * @param txq
@@ -267,7 +338,6 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
 static int
 inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
 	   uint32_t *length,
-	   uint8_t *cs_flags,
 	   uintptr_t *addr,
 	   uint16_t *pkt_inline_sz,
 	   uint8_t **raw,
@@ -279,9 +349,8 @@ inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
 				    (1 << txq->wqe_n) * MLX5_WQE_SIZE);
 	unsigned int copy_b;
 	uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
-	const uint8_t tunneled = txq->tunnel_en &&
-				 (buf->ol_flags & (PKT_TX_TUNNEL_GRE |
-						   PKT_TX_TUNNEL_VXLAN));
+	const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags &
+				 PKT_TX_TUNNEL_MASK);
 	uint16_t n_wqe;
 
 	*tso_segsz = buf->tso_segsz;
@@ -290,19 +359,15 @@ inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
 		txq->stats.oerrors++;
 		return -EINVAL;
 	}
-	if (tunneled) {
+	if (tunneled)
 		*tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
-		*cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
-	} else {
-		*cs_flags |= MLX5_ETH_WQE_L4_CSUM;
-	}
-	if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER)) {
+	/* First seg must contain all TSO headers. */
+	if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) ||
+		     *tso_header_sz > DATA_LEN(buf)) {
 		txq->stats.oerrors++;
 		return -EINVAL;
 	}
 	copy_b = *tso_header_sz - *pkt_inline_sz;
-	/* First seg must contain all TSO headers. */
-	assert(copy_b <= *length);
 	if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
 		return -EAGAIN;
 	n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
@@ -435,7 +500,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	if (unlikely(!max_wqe))
 		return 0;
 	do {
-		struct rte_mbuf *buf = NULL;
+		struct rte_mbuf *buf = *pkts; /* First_seg. */
 		uint8_t *raw;
 		volatile struct mlx5_wqe_v *wqe = NULL;
 		volatile rte_v128u32_t *dseg = NULL;
@@ -447,15 +512,16 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		uint16_t tso_header_sz = 0;
 		uint16_t ehdr;
 		uint8_t cs_flags;
-		uint64_t tso = 0;
+		uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
+		uint8_t is_vlan = !!(buf->ol_flags & PKT_TX_VLAN_PKT);
+		uint32_t swp_offsets = 0;
+		uint8_t swp_types = 0;
 		uint16_t tso_segsz = 0;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		uint32_t total_length = 0;
 #endif
 		int ret;
 
-		/* first_seg */
-		buf = *pkts;
 		segs_n = buf->nb_segs;
 		/*
 		 * Make sure there is enough room to store this packet and
@@ -490,10 +556,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		if (pkts_n - i > 1)
 			rte_prefetch0(
 			    rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
-		cs_flags = txq_ol_cksum_to_cs(txq, buf);
+		cs_flags = txq_ol_cksum_to_cs(buf);
+		txq_mbuf_to_swp(txq, buf, tso, is_vlan,
+				(uint8_t *)&swp_offsets, &swp_types);
 		raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
 		/* Replace the Ethernet type by the VLAN if necessary. */
-		if (buf->ol_flags & PKT_TX_VLAN_PKT) {
+		if (is_vlan) {
 			uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
 							 buf->vlan_tci);
 			unsigned int len = 2 * ETHER_ADDR_LEN - 2;
@@ -516,9 +584,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			addr += pkt_inline_sz;
 		}
 		raw += MLX5_WQE_DWORD_SIZE;
-		tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
 		if (tso) {
-			ret = inline_tso(txq, buf, &length, &cs_flags,
+			ret = inline_tso(txq, buf, &length,
 					 &addr, &pkt_inline_sz,
 					 &raw, &max_wqe,
 					 &tso_segsz, &tso_header_sz);
@@ -695,8 +762,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 				0,
 			};
 			wqe->eseg = (rte_v128u32_t){
-				0,
-				cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
+				swp_offsets,
+				cs_flags | (swp_types << 8) |
+				(rte_cpu_to_be_16(tso_segsz) << 16),
 				0,
 				(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
 			};
@@ -709,8 +777,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 				0,
 			};
 			wqe->eseg = (rte_v128u32_t){
-				0,
-				cs_flags,
+				swp_offsets,
+				cs_flags | (swp_types << 8),
 				0,
 				(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
 			};
@@ -882,7 +950,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		}
 		max_elts -= segs_n;
 		--pkts_n;
-		cs_flags = txq_ol_cksum_to_cs(txq, buf);
+		cs_flags = txq_ol_cksum_to_cs(buf);
 		/* Retrieve packet information. */
 		length = PKT_LEN(buf);
 		assert(length);
@@ -1114,7 +1182,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
 		 * iteration.
 		 */
 		max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
-		cs_flags = txq_ol_cksum_to_cs(txq, buf);
+		cs_flags = txq_ol_cksum_to_cs(buf);
 		/* Retrieve packet information. */
 		length = PKT_LEN(buf);
 		/* Start new session if packet differs. */
@@ -1391,7 +1459,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 		/* Make sure there is enough room to store this packet. */
 		if (max_elts - j == 0)
 			break;
-		cs_flags = txq_ol_cksum_to_cs(txq, buf);
+		cs_flags = txq_ol_cksum_to_cs(buf);
 		/* Retrieve packet information. */
 		length = PKT_LEN(buf);
 		/* Start new session if:
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index f5af43735..2bcf316ed 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -168,6 +168,7 @@ struct mlx5_txq_data {
 	uint16_t tso_en:1; /* When set hardware TSO is enabled. */
 	uint16_t tunnel_en:1;
 	/* When set TX offload for tunneled packets are supported. */
+	uint16_t swp_en:1; /* Whether SW parser is enabled. */
 	uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
 	uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
 	uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
@@ -280,8 +281,12 @@ uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
 /* mlx5_rxtx.c */
 
 extern uint32_t mlx5_ptype_table[];
+extern uint8_t mlx5_cksum_table[];
+extern uint8_t mlx5_swp_types_table[];
 
 void mlx5_set_ptype_table(void);
+void mlx5_set_cksum_table(void);
+void mlx5_set_swp_types_table(void);
 uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
 		       uint16_t pkts_n);
 uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
@@ -614,38 +619,89 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
 }
 
 /**
- * Convert the Checksum offloads to Verbs.
+ * Convert mbuf to Verb SWP.
  *
  * @param txq_data
  *   Pointer to the Tx queue.
  * @param buf
  *   Pointer to the mbuf.
+ * @param tso
+ *   TSO offloads enabled.
+ * @param vlan
+ *   VLAN offloads enabled
+ * @param offsets
+ *   Pointer to the SWP header offsets.
+ * @param swp_types
+ *   Pointer to the SWP header types.
+ */
+static __rte_always_inline void
+txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+		 uint8_t tso, uint64_t vlan,
+		 uint8_t *offsets, uint8_t *swp_types)
+{
+	uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
+	uint16_t idx;
+	uint16_t off;
+	const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
+				       PKT_TX_OUTER_IPV6;
+
+	if (likely(!tunnel || !txq->swp_en ||
+		   (tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP)))
+		return;
+	/*
+	 * The index should have:
+	 * bit[0:1] = PKT_TX_L4_MASK
+	 * bit[4] = PKT_TX_IPV6
+	 * bit[8] = PKT_TX_OUTER_IPV6
+	 * bit[9] = PKT_TX_OUTER_UDP
+	 */
+	idx = (buf->ol_flags & ol_flags_mask) >> 52;
+	if (tunnel == PKT_TX_TUNNEL_UDP)
+		idx |= 1 << 9;
+	*swp_types = mlx5_swp_types_table[idx];
+	/* swp offsets. */
+	off = buf->outer_l2_len + (vlan ? 4 : 0); /* Outer L3 offset. */
+	if (tso || (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM))
+		offsets[1] = off >> 1;
+	off += buf->outer_l3_len; /* Outer L4 offset. */
+	if (tunnel == PKT_TX_TUNNEL_UDP)
+		offsets[0] = off >> 1;
+	off += buf->l2_len; /* Inner L3 offset. */
+	if (tso || (buf->ol_flags & PKT_TX_IP_CKSUM))
+		offsets[3] = off >> 1;
+	off += buf->l3_len; /* Inner L4 offset. */
+	if (tso || ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) ||
+	    ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM))
+		offsets[2] = off >> 1;
+}
+
+/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param buf
+ *   Pointer to the mbuf.
  *
  * @return
- *   the converted cs_flags.
+ *   Converted checksum flags.
  */
 static __rte_always_inline uint8_t
-txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
+txq_ol_cksum_to_cs(struct rte_mbuf *buf)
 {
-	uint8_t cs_flags = 0;
-
-	/* Should we enable HW CKSUM offload */
-	if (buf->ol_flags &
-	    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
-	     PKT_TX_OUTER_IP_CKSUM)) {
-		if (txq_data->tunnel_en &&
-		    (buf->ol_flags &
-		     (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
-			cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
-				   MLX5_ETH_WQE_L4_INNER_CSUM;
-			if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
-				cs_flags |= MLX5_ETH_WQE_L3_CSUM;
-		} else {
-			cs_flags = MLX5_ETH_WQE_L3_CSUM |
-				   MLX5_ETH_WQE_L4_CSUM;
-		}
-	}
-	return cs_flags;
+	uint32_t idx;
+	uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
+	const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
+				       PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+
+	/*
+	 * The index should have:
+	 * bit[0] = PKT_TX_TCP_SEG
+	 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+	 * bit[4] = PKT_TX_IP_CKSUM
+	 * bit[8] = PKT_TX_OUTER_IP_CKSUM
+	 * bit[9] = tunnel
+	 */
+	idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
+	return mlx5_cksum_table[idx];
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 257d7b11c..24a80cf07 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -42,8 +42,6 @@
 /**
  * Count the number of packets having same ol_flags and calculate cs_flags.
  *
- * @param txq
- *   Pointer to TX queue structure.
  * @param pkts
  *   Pointer to array of packets.
  * @param pkts_n
@@ -55,8 +53,7 @@
  *   Number of packets having same ol_flags.
  */
 static inline unsigned int
-txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
-		 uint16_t pkts_n, uint8_t *cs_flags)
+txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags)
 {
 	unsigned int pos;
 	const uint64_t ol_mask =
@@ -70,7 +67,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 	for (pos = 1; pos < pkts_n; ++pos)
 		if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
 			break;
-	*cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]);
+	*cs_flags = txq_ol_cksum_to_cs(pkts[0]);
 	return pos;
 }
 
@@ -141,7 +138,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
 			n = txq_count_contig_single_seg(&pkts[nb_tx], n);
 		if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
-			n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
+			n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags);
 		ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
 		nb_tx += ret;
 		if (!ret)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index bbe1818ef..37ad768e5 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -142,7 +142,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 			break;
 		wqe = &((volatile struct mlx5_wqe64 *)
 			 txq->wqes)[wqe_ci & wq_mask].hdr;
-		cs_flags = txq_ol_cksum_to_cs(txq, buf);
+		cs_flags = txq_ol_cksum_to_cs(buf);
 		/* Title WQEBB pointer. */
 		t_wqe = (uint8x16_t *)wqe;
 		dseg = (uint8_t *)(wqe + 1);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index c088bcb51..d531d2b10 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -144,7 +144,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
 		}
 		wqe = &((volatile struct mlx5_wqe64 *)
 			 txq->wqes)[wqe_ci & wq_mask].hdr;
-		cs_flags = txq_ol_cksum_to_cs(txq, buf);
+		cs_flags = txq_ol_cksum_to_cs(buf);
 		/* Title WQEBB pointer. */
 		t_wqe = (__m128i *)wqe;
 		dseg = (__m128i *)(wqe + 1);
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 9139429be..3f3912b45 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -119,6 +119,9 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 		if (config->tso)
 			offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
 				     DEV_TX_OFFLOAD_GRE_TNL_TSO);
+		if (config->swp)
+			offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
+				     DEV_TX_OFFLOAD_UDP_TNL_TSO);
 	}
 	return offloads;
 }
@@ -686,7 +689,9 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 	int is_empw_func = is_empw_burst_func(tx_pkt_burst);
 	int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
 					       DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-					       DEV_TX_OFFLOAD_GRE_TNL_TSO));
+					       DEV_TX_OFFLOAD_GRE_TNL_TSO |
+					       DEV_TX_OFFLOAD_IP_TNL_TSO |
+					       DEV_TX_OFFLOAD_UDP_TNL_TSO));
 
 	txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
 		0 : config->txq_inline;
@@ -767,6 +772,9 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 		txq_ctrl->txq.tso_en = 1;
 	}
 	txq_ctrl->txq.tunnel_en = config->tunnel_en;
+	txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
+				 DEV_TX_OFFLOAD_UDP_TNL_TSO) &
+				txq_ctrl->txq.offloads) && config->swp;
 }
 
 /**
-- 
2.13.3

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 3/3] net/mlx5: allow max 192B TSO inline header length
  2018-04-08 12:41 [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO Xueming Li
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 1/3] net/mlx5: separate TSO function in Tx data path Xueming Li
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 2/3] net/mlx5: support generic tunnel offloading Xueming Li
@ 2018-04-08 12:41 ` Xueming Li
  2018-04-24  9:18   ` Yongseok Koh
  2018-04-24 16:51 ` [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO Shahaf Shuler
  3 siblings, 1 reply; 8+ messages in thread
From: Xueming Li @ 2018-04-08 12:41 UTC (permalink / raw)
  To: Yongseok Koh, Shahaf Shuler; +Cc: Xueming Li, dev

Change max inline header length to 192B to allow IPv6 VXLAN TSO headers
and header with options that more than 128B.

Signed-off-by: Xueming Li <xuemingl@mellanox.com>
---
 drivers/net/mlx5/mlx5_defs.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 6401588ee..851166ed9 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -58,7 +58,7 @@
 #define MLX5_MAX_XSTATS 32
 
 /* Maximum Packet headers size (L2+L3+L4) for TSO. */
-#define MLX5_MAX_TSO_HEADER 128
+#define MLX5_MAX_TSO_HEADER 192
 
 /* Default minimum number of Tx queues for vectorized Tx. */
 #define MLX5_VPMD_MIN_TXQS 4
-- 
2.13.3

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH 1/3] net/mlx5: separate TSO function in Tx data path
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 1/3] net/mlx5: separate TSO function in Tx data path Xueming Li
@ 2018-04-24  9:17   ` Yongseok Koh
  0 siblings, 0 replies; 8+ messages in thread
From: Yongseok Koh @ 2018-04-24  9:17 UTC (permalink / raw)
  To: Xueming Li; +Cc: Shahaf Shuler, dev

On Sun, Apr 08, 2018 at 08:41:19PM +0800, Xueming Li wrote:
> Separate TSO function to make logic of mlx5_tx_burst clear.
> 
> Signed-off-by: Xueming Li <xuemingl@mellanox.com>
> ---
Acked-by: Yongseok Koh <yskoh@mellanox.com>

Thanks

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH 3/3] net/mlx5: allow max 192B TSO inline header length
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 3/3] net/mlx5: allow max 192B TSO inline header length Xueming Li
@ 2018-04-24  9:18   ` Yongseok Koh
  0 siblings, 0 replies; 8+ messages in thread
From: Yongseok Koh @ 2018-04-24  9:18 UTC (permalink / raw)
  To: Xueming Li; +Cc: Shahaf Shuler, dev

On Sun, Apr 08, 2018 at 08:41:21PM +0800, Xueming Li wrote:
> Change max inline header length to 192B to allow IPv6 VXLAN TSO headers
> and header with options that more than 128B.
> 
> Signed-off-by: Xueming Li <xuemingl@mellanox.com>
> ---
Acked-by: Yongseok Koh <yskoh@mellanox.com>

Thanks

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH 2/3] net/mlx5: support generic tunnel offloading
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 2/3] net/mlx5: support generic tunnel offloading Xueming Li
@ 2018-04-24 10:27   ` Yongseok Koh
  0 siblings, 0 replies; 8+ messages in thread
From: Yongseok Koh @ 2018-04-24 10:27 UTC (permalink / raw)
  To: Xueming Li; +Cc: Shahaf Shuler, dev

On Sun, Apr 08, 2018 at 08:41:20PM +0800, Xueming Li wrote:
> This commit adds support for generic tunnel TSO and checksum offload.
> PMD will compute the inner/outer headers offset according to the
> mbuf fields. Hardware will do calculation based on offsets and types.
> 
> Signed-off-by: Xueming Li <xuemingl@mellanox.com>
> ---
Acked-by: Yongseok Koh <yskoh@mellanox.com>

Thanks

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO
  2018-04-08 12:41 [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO Xueming Li
                   ` (2 preceding siblings ...)
  2018-04-08 12:41 ` [dpdk-dev] [PATCH 3/3] net/mlx5: allow max 192B TSO inline header length Xueming Li
@ 2018-04-24 16:51 ` Shahaf Shuler
  3 siblings, 0 replies; 8+ messages in thread
From: Shahaf Shuler @ 2018-04-24 16:51 UTC (permalink / raw)
  To: Xueming(Steven) Li, Yongseok Koh
  Cc: Xueming(Steven) Li, dev, Raslan Darawsheh, Wael Abualrub

Sunday, April 8, 2018 3:41 PM, Xueming Li:
> Subject: [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum
> and TSO
> 
> This patchset introduced Tx generic tunnel checksum and TSO offload to mlx5
> PMD.
> 
> This patchset relies on new ethdev API of:
> 	https://emea01.safelinks.protection.outlook.com/?url=http%3A%2F
> %2Fwww.dpdk.org%2Fdev%2Fpatchwork%2Fpatch%2F37519%2F&data=02%
> 7C01%7Cshahafs%40mellanox.com%7Cbd878212bf9e4942028b08d59d4e12d
> e%7Ca652971c7d2e4d9ba6a4d149256f461b%7C0%7C0%7C6365878810527844
> 12&sdata=8WpLks1uehohmtQia%2FsIZ69ZL59EQRWsozBl9t2hlz4%3D&reserv
> ed=0
> 
> Xueming Li (3):
>   net/mlx5: separate TSO function in Tx data path
>   net/mlx5: support generic tunnel offloading
>   net/mlx5: allow max 192B TSO inline header length
> 
>  drivers/net/mlx5/Makefile             |   5 +
>  drivers/net/mlx5/mlx5.c               |  14 ++-
>  drivers/net/mlx5/mlx5.h               |   1 +
>  drivers/net/mlx5/mlx5_defs.h          |   2 +-
>  drivers/net/mlx5/mlx5_ethdev.c        |   5 +-
>  drivers/net/mlx5/mlx5_prm.h           |  24 ++++
>  drivers/net/mlx5/mlx5_rxtx.c          | 208 ++++++++++++++++++++++++------
> ----
>  drivers/net/mlx5/mlx5_rxtx.h          | 100 ++++++++++++----
>  drivers/net/mlx5/mlx5_rxtx_vec.c      |   9 +-
>  drivers/net/mlx5/mlx5_rxtx_vec_neon.h |   2 +-
>  drivers/net/mlx5/mlx5_rxtx_vec_sse.h  |   2 +-
>  drivers/net/mlx5/mlx5_txq.c           |  10 +-
>  12 files changed, 289 insertions(+), 93 deletions(-)

Applied to next-net-mlx, thanks.

> 
> --
> 2.13.3

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2018-04-24 16:51 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-08 12:41 [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO Xueming Li
2018-04-08 12:41 ` [dpdk-dev] [PATCH 1/3] net/mlx5: separate TSO function in Tx data path Xueming Li
2018-04-24  9:17   ` Yongseok Koh
2018-04-08 12:41 ` [dpdk-dev] [PATCH 2/3] net/mlx5: support generic tunnel offloading Xueming Li
2018-04-24 10:27   ` Yongseok Koh
2018-04-08 12:41 ` [dpdk-dev] [PATCH 3/3] net/mlx5: allow max 192B TSO inline header length Xueming Li
2018-04-24  9:18   ` Yongseok Koh
2018-04-24 16:51 ` [dpdk-dev] [PATCH 0/3] mlx5 support Tx generic tunnel checksum and TSO Shahaf Shuler

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).