From: Xueming Li <xuemingl@mellanox.com>
To: Olivier MATZ <olivier.matz@6wind.com>,
Thomas Monjalon <thomas@monjalon.net>,
Jingjing Wu <jingjing.wu@intel.com>,
Yongseok Koh <yskoh@mellanox.com>
Cc: Xueming Li <xuemingl@mellanox.com>,
Shahaf Shuler <shahafs@mellanox.com>,
dev@dpdk.org
Subject: [dpdk-dev] [PATCH 1/6] net/mlx5: support tx swp tunnel offloading
Date: Tue, 9 Jan 2018 22:11:05 +0800 [thread overview]
Message-ID: <20180109141110.146250-2-xuemingl@mellanox.com> (raw)
In-Reply-To: <20180109141110.146250-1-xuemingl@mellanox.com>
This commit adds support for generic tunnel TSO and checksum offloads.
The PMD will compute the inner/outer headers offset according to the
mbuf fields. The Hardware will do calculation according to offsets and types.
Such capability is supported only for PFs.
Signed-off-by: Xueming Li <xuemingl@mellanox.com>
---
drivers/net/mlx5/mlx5_prm.h | 12 ++++
drivers/net/mlx5/mlx5_rxtx.c | 163 ++++++++++++++++++++++++++++---------------
drivers/net/mlx5/mlx5_rxtx.h | 94 ++++++++++++++++++++-----
drivers/net/mlx5/mlx5_txq.c | 1 +
4 files changed, 195 insertions(+), 75 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 2de310bcb..edf39c249 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -135,6 +135,18 @@
/* Inner L4 checksum offload (Tunneled packets only). */
#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+/* Outer L4 type is UDP. */
+#define MLX5_ETH_OUTER_L4_UDP (1u << 5)
+
+/* Outer L3 type is IPV6. */
+#define MLX5_ETH_OUTER_L3_IPV6 (1u << 4)
+
+/* Inner L4 type is UDP. */
+#define MLX5_ETH_INNER_L4_UDP (1u << 1)
+
+/* Inner L3 type is IPV6. */
+#define MLX5_ETH_INNER_L3_IPV6 (1u << 0)
+
/* Is flow mark valid. */
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 3b8f71c28..d79f9fc0e 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -247,6 +247,80 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
}
/**
+ * Inline TSO headers into WQE and set checksums fields.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param buf
+ * Pointer to packet mbuf structure.
+ * @param raw
+ * Double pointer to WQE current write offset.
+ * @param cs_flags
+ * Pointer to checksums flags.
+ * @swp_offsets
+ * Pointer to header offsets when using software parser.
+ * @swp_types
+ * Pointer to header types when using software parser.
+ * @param max_wqe
+ * Pointer to the available number of wqes.
+ *
+ * @return
+ * Headers size which were copied into wqe upon success,
+ * negative errno value otherwise, the following erros
+ * are defined:
+ *
+ * -EINVAL: invalid arugments for TSO. packet headers are too large
+ * or not enough WQEs. cannot execute the TSO.
+ *
+ * -ENOMEM: reached the end of WQ ring. the TSO WQE can be executed
+ * only after the WQ ring wraparound.
+ */
+static int
+process_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf, uint8_t **raw,
+ uint16_t *max_wqe)
+{
+ uintptr_t addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)
+ tx_mlx5_wqe(txq, txq->wqe_ci);
+ uint8_t *curr = *raw;
+ const uint8_t tunneled = txq->tunnel_en &&
+ (buf->ol_flags & PKT_TX_TUNNEL_MASK);
+ uint16_t pkt_inline_sz = (uintptr_t)curr - (uintptr_t)wqe -
+ (MLX5_WQE_DWORD_SIZE * 2 - 2);
+ uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
+ uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+ unsigned int copy_b;
+ uint16_t tso_header_sz;
+
+ if (vlan_sz)
+ addr += 2 * ETHER_ADDR_LEN + 2;
+ else
+ addr += pkt_inline_sz;
+ tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len;
+ if (tunneled)
+ tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
+ if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) {
+ txq->stats.oerrors++;
+ return -EINVAL;
+ }
+ copy_b = tso_header_sz - pkt_inline_sz;
+ if (copy_b && ((end - (uintptr_t)curr) > copy_b)) {
+ uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+
+ if (unlikely(*max_wqe < n))
+ return -EINVAL;
+ *max_wqe -= n;
+ rte_memcpy((void *)curr, (void *)addr, copy_b);
+ /* Another DWORD will be added in the inline part. */
+ *raw = curr + MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE;
+ } else {
+ return -ENOMEM;
+ }
+ return copy_b;
+}
+
+/**
* DPDK callback to check the status of a tx descriptor.
*
* @param tx_queue
@@ -376,6 +450,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t ehdr;
uint8_t cs_flags;
uint64_t tso = 0;
+ uint32_t swp_offsets = 0;
+ uint8_t swp_types = 0;
uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
@@ -417,7 +493,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (pkts_n - i > 1)
rte_prefetch0(
rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_flags_to_verbs(txq, buf,
+ (uint8_t *)&swp_offsets,
+ &swp_types);
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
@@ -445,69 +523,37 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
raw += MLX5_WQE_DWORD_SIZE;
tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
if (tso) {
- uintptr_t end =
- (uintptr_t)(((uintptr_t)txq->wqes) +
- (1 << txq->wqe_n) * MLX5_WQE_SIZE);
- unsigned int copy_b;
- uint8_t vlan_sz =
- (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
- const uint64_t is_tunneled =
- buf->ol_flags & (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- tso_header_sz = buf->l2_len + vlan_sz +
- buf->l3_len + buf->l4_len;
- tso_segsz = buf->tso_segsz;
- if (unlikely(tso_segsz == 0)) {
- txq->stats.oerrors++;
- break;
- }
- if (is_tunneled && txq->tunnel_en) {
- tso_header_sz += buf->outer_l2_len +
- buf->outer_l3_len;
- cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
- } else {
- cs_flags |= MLX5_ETH_WQE_L4_CSUM;
- }
- if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) {
- txq->stats.oerrors++;
- break;
- }
- copy_b = tso_header_sz - pkt_inline_sz;
- /* First seg must contain all headers. */
- assert(copy_b <= length);
- if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
- uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+ int ret;
- if (unlikely(max_wqe < n))
- break;
- max_wqe -= n;
- rte_memcpy((void *)raw, (void *)addr, copy_b);
- addr += copy_b;
- length -= copy_b;
- /* Include padding for TSO header. */
- copy_b = MLX5_WQE_DS(copy_b) *
- MLX5_WQE_DWORD_SIZE;
- pkt_inline_sz += copy_b;
- raw += copy_b;
- } else {
+ ret = process_tso(txq, buf, &raw, &max_wqe);
+ if (ret == -EINVAL) {
+ break;
+ } else if (ret == -ENOMEM) {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
- rte_cpu_to_be_32(txq->wqe_ci << 8),
- rte_cpu_to_be_32(txq->qp_num_8s | 1),
- 0,
- 0,
+ rte_cpu_to_be_32(txq->wqe_ci << 8),
+ rte_cpu_to_be_32(txq->qp_num_8s | 1),
+ 0,
+ 0,
};
ds = 1;
-#ifdef MLX5_PMD_SOFT_COUNTERS
total_length = 0;
-#endif
k++;
goto next_wqe;
+ } else {
+ tso_segsz = buf->tso_segsz;
+ if (unlikely(tso_segsz == 0)) {
+ txq->stats.oerrors++;
+ break;
+ }
+ addr += ret;
+ length -= ret;
+ pkt_inline_sz += ret;
+ tso_header_sz = pkt_inline_sz;
}
}
/* Inline if enough room. */
- if (max_inline || tso) {
+ if (max_inline || unlikely(tso)) {
uint32_t inl = 0;
uintptr_t end = (uintptr_t)
(((uintptr_t)txq->wqes) +
@@ -652,7 +698,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
++i;
j += sg;
/* Initialize known and common part of the WQE structure. */
- if (tso) {
+ if (unlikely(tso)) {
wqe->ctrl = (rte_v128u32_t){
rte_cpu_to_be_32((txq->wqe_ci << 8) |
MLX5_OPCODE_TSO),
@@ -661,8 +707,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
0,
};
wqe->eseg = (rte_v128u32_t){
- 0,
- cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
+ swp_offsets,
+ cs_flags | (swp_types << 8) |
+ (rte_cpu_to_be_16(tso_segsz) << 16),
0,
(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
};
@@ -675,8 +722,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
0,
};
wqe->eseg = (rte_v128u32_t){
- 0,
- cs_flags,
+ swp_offsets,
+ cs_flags | (swp_types << 8),
0,
(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
};
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 4ade8bee1..852594708 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -196,6 +196,7 @@ struct mlx5_txq_data {
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint16_t tunnel_en:1;
/* When set TX offload for tunneled packets are supported. */
+ uint16_t swp_en:1; /* When set software parser is supported. */
uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
@@ -623,40 +624,99 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
}
/**
- * Convert the Checksum offloads to Verbs.
+ * Convert mbuf tx offloads info to Verbs.
*
* @param txq_data
* Pointer to the Tx queue.
* @param buf
* Pointer to the mbuf.
+ * @param offsets
+ * Pointer to the header offsets.
+ * @param swp_types
+ * Pointer to the swp types.
*
* @return
* the converted cs_flags.
*/
static __rte_always_inline uint8_t
-txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
+txq_ol_flags_to_verbs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf,
+ uint8_t *offsets, uint8_t *swp_types)
{
uint8_t cs_flags = 0;
-
- /* Should we enable HW CKSUM offload */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- if (txq_data->tunnel_en &&
- (buf->ol_flags &
- (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
- cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
+ uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
+ const uint8_t tunnel = txq_data->tunnel_en &&
+ (buf->ol_flags & PKT_TX_TUNNEL_MASK);
+ const uint8_t tso = txq_data->tso_en &&
+ (buf->ol_flags & PKT_TX_TCP_SEG);
+ uint16_t off = buf->outer_l2_len + vlan_sz;
+
+ if (likely(!tso && !(buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
+ PKT_TX_OUTER_IP_CKSUM))))
+ return cs_flags;
+ if (likely(!tunnel)) {
+ if (buf->ol_flags & PKT_TX_IP_CKSUM)
+ cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (tso || (buf->ol_flags & PKT_TX_L4_MASK))
+ cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ return cs_flags;
+ }
+ /* Tunneled packets */
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (buf->ol_flags & PKT_TX_IP_CKSUM)
+ cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (tso || (buf->ol_flags & PKT_TX_L4_MASK))
+ cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (!txq_data->swp_en) /* HW offloading, only set csum flags*/
+ return cs_flags;
+ /* SW Parer enabled */
+ if (tso || (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)) {
+ offsets[1] = off >> 1; /* Outer L3 offset */
+ if (buf->ol_flags & PKT_TX_OUTER_IPV6)
+ *swp_types |= MLX5_ETH_OUTER_L3_IPV6;
+ }
+ off += buf->outer_l3_len;
+ /* TODO is outer L4 required? */
+ if (tso && (buf->ol_flags & PKT_TX_TUNNEL_VXLAN)) {
+ offsets[0] = off >> 1; /* Outer L4 offset */
+ *swp_types |= MLX5_ETH_OUTER_L4_UDP;
+ }
+ off += buf->l2_len;
+ if (tso || (buf->ol_flags & PKT_TX_IP_CKSUM)) {
+ offsets[3] = off >> 1; /* Inner L3 offset */
+ if (buf->ol_flags & PKT_TX_IPV6)
+ *swp_types |= MLX5_ETH_INNER_L3_IPV6;
+ }
+ if (tso || (buf->ol_flags & PKT_TX_L4_MASK)) {
+ off += buf->l3_len;
+ offsets[2] = off >> 1; /* Inner L4 offset */
+ if ((buf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
+ *swp_types |= MLX5_ETH_INNER_L4_UDP;
}
return cs_flags;
}
/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param txq_data
+ * Pointer to the Tx queue.
+ * @param buf
+ * Pointer to the mbuf.
+ *
+ * @return
+ * the converted cs_flags.
+ */
+static __rte_always_inline uint8_t
+txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
+{
+ uint32_t offsets;
+ uint8_t swp_types;
+ return txq_ol_flags_to_verbs(txq_data, buf, (uint8_t *)&offsets, &swp_types);
+}
+
+/**
* Count the number of contiguous single segment packets.
*
* @param pkts
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index b81c85fed..bd7ba0834 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -729,6 +729,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
txq_ctrl->txq.tso_en = 1;
}
txq_ctrl->txq.tunnel_en = config->tunnel_en;
+ txq_ctrl->txq.swp_en = 1;
}
/**
--
2.13.3
next prev parent reply other threads:[~2018-01-09 15:00 UTC|newest]
Thread overview: 80+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-09 14:11 [dpdk-dev] [PATCH 0/6] Support generic tunnel TX csum and TSO Xueming Li
2018-01-09 14:11 ` Xueming Li [this message]
2018-01-29 15:08 ` [dpdk-dev] [PATCH v2 1/5] ethdev: introduce Tx generic tunnel offloads Xueming Li
2018-01-29 16:49 ` Ananyev, Konstantin
2018-01-30 3:01 ` Xueming(Steven) Li
2018-01-30 13:28 ` Ananyev, Konstantin
2018-01-30 15:27 ` Xueming(Steven) Li
2018-01-30 15:33 ` Ananyev, Konstantin
2018-01-30 15:47 ` Xueming(Steven) Li
2018-01-30 16:02 ` Ananyev, Konstantin
2018-01-30 16:10 ` Xueming(Steven) Li
2018-01-30 17:04 ` Ananyev, Konstantin
2018-01-30 17:54 ` Xueming(Steven) Li
2018-01-30 20:21 ` Thomas Monjalon
2018-01-31 15:20 ` Xueming(Steven) Li
2018-01-31 15:17 ` Xueming(Steven) Li
2018-01-29 15:08 ` [dpdk-dev] [PATCH v2 2/5] app/testpmd: testpmd support " Xueming Li
2018-01-29 15:08 ` [dpdk-dev] [PATCH v2 3/5] net/mlx5: separate TSO function in Tx data path Xueming Li
2018-01-29 15:08 ` [dpdk-dev] [PATCH v2 4/5] net/mlx5: support generic tunnel offloading Xueming Li
2018-01-29 15:08 ` [dpdk-dev] [PATCH v2 5/5] net/mlx5: allow max 192B TSO inline header length Xueming Li
2018-03-05 14:51 ` [dpdk-dev] [PATCH v3 0/7] support generic tunnel Tx checksum and TSO Xueming Li
2018-03-05 14:51 ` [dpdk-dev] [PATCH v3 1/7] ethdev: introduce Tx generic tunnel L3/L4 offload Xueming Li
2018-03-21 1:40 ` Yongseok Koh
2018-03-22 13:55 ` Xueming(Steven) Li
2018-03-28 12:52 ` Olivier Matz
2018-04-04 8:20 ` Xueming(Steven) Li
2018-03-05 14:51 ` [dpdk-dev] [PATCH v3 2/7] app/testpmd: testpmd support Tx generic tunnel offloads Xueming Li
2018-03-05 14:51 ` [dpdk-dev] [PATCH v3 3/7] app/testpmd: add more GRE extension to csum engine Xueming Li
2018-03-05 14:51 ` [dpdk-dev] [PATCH v3 4/7] app/testpmd: introduce VXLAN GPE to csum forwarding engine Xueming Li
2018-03-05 14:51 ` [dpdk-dev] [PATCH v3 5/7] net/mlx5: separate TSO function in Tx data path Xueming Li
2018-03-05 14:51 ` [dpdk-dev] [PATCH v3 6/7] net/mlx5: support generic tunnel offloading Xueming Li
2018-03-05 14:51 ` [dpdk-dev] [PATCH v3 7/7] net/mlx5: allow max 192B TSO inline header length Xueming Li
2018-04-08 12:32 ` [dpdk-dev] [PATCH v4 0/4] support Tx generic tunnel checksum and TSO Xueming Li
2018-04-17 14:43 ` [dpdk-dev] [PATCH v5 0/2] " Xueming Li
2018-04-17 14:47 ` [dpdk-dev] [PATCH v5 1/2] ethdev: introduce generic IP/UDP " Xueming Li
2018-04-17 21:21 ` Thomas Monjalon
2018-04-17 14:49 ` [dpdk-dev] [PATCH v5 2/2] app/testpmd: testpmd support Tx generic tunnel offloads Xueming Li
2018-04-18 13:38 ` [dpdk-dev] [PATCH v6 0/2] support Tx generic tunnel checksum and TSO Xueming Li
2018-04-18 13:58 ` [dpdk-dev] [PATCH v6 1/2] ethdev: introduce generic IP/UDP " Xueming Li
2018-04-18 14:28 ` Thomas Monjalon
2018-04-18 16:45 ` Ananyev, Konstantin
2018-04-18 18:02 ` Thomas Monjalon
2018-04-23 9:55 ` Olivier Matz
2018-04-20 12:48 ` [dpdk-dev] [PATCH v7 0/2] support Tx generic " Xueming Li
2018-04-23 11:36 ` [dpdk-dev] [PATCH v8 " Xueming Li
2018-04-23 16:17 ` Ferruh Yigit
2018-04-23 11:36 ` [dpdk-dev] [PATCH v8 1/2] ethdev: introduce generic IP/UDP " Xueming Li
2018-04-23 11:49 ` Xueming Li
2018-04-23 11:36 ` [dpdk-dev] [PATCH v8 2/2] app/testpmd: testpmd support Tx generic tunnel offloads Xueming Li
2018-04-20 12:48 ` [dpdk-dev] [PATCH v7 1/2] ethdev: introduce generic IP/UDP tunnel checksum and TSO Xueming Li
2018-04-23 9:59 ` Olivier Matz
2018-04-20 12:48 ` [dpdk-dev] [PATCH v7 2/2] app/testpmd: testpmd support Tx generic tunnel offloads Xueming Li
2018-04-18 13:59 ` [dpdk-dev] [PATCH v6 " Xueming Li
2018-04-08 12:32 ` [dpdk-dev] [PATCH v4 1/4] ethdev: introduce generic IP/UDP tunnel checksum and TSO Xueming Li
2018-04-16 22:42 ` Thomas Monjalon
2018-04-17 7:53 ` Xueming(Steven) Li
2018-04-17 8:10 ` Thomas Monjalon
2018-04-08 12:32 ` [dpdk-dev] [PATCH v4 2/4] app/testpmd: testpmd support Tx generic tunnel offloads Xueming Li
2018-04-17 14:24 ` Iremonger, Bernard
2018-04-17 15:44 ` Xueming(Steven) Li
2018-04-08 12:32 ` [dpdk-dev] [PATCH v4 3/4] app/testpmd: add more GRE extension to csum engine Xueming Li
2018-04-16 22:45 ` Thomas Monjalon
2018-04-17 5:19 ` Xueming(Steven) Li
2018-04-08 12:32 ` [dpdk-dev] [PATCH v4 4/4] app/testpmd: introduce VXLAN GPE to csum forwarding engine Xueming Li
2018-04-16 22:46 ` Thomas Monjalon
2018-04-17 13:56 ` Iremonger, Bernard
2018-04-17 14:12 ` Xueming(Steven) Li
2018-01-09 14:11 ` [dpdk-dev] [PATCH 2/6] net/mlx5: allow max 192B WQE TSO inline header length Xueming Li
2018-01-09 14:11 ` [dpdk-dev] [PATCH 3/6] net/mlx5: add SWP PCI parameter for TX common tunnel offloads Xueming Li
2018-01-09 14:11 ` [dpdk-dev] [PATCH 4/6] ethdev: introduce " Xueming Li
2018-01-11 18:38 ` Ferruh Yigit
2018-01-16 17:10 ` Olivier Matz
2018-01-16 17:28 ` Xueming(Steven) Li
2018-01-16 19:06 ` Shahaf Shuler
2018-01-22 12:46 ` Olivier Matz
2018-01-22 20:06 ` Shahaf Shuler
2018-01-17 0:50 ` Yongseok Koh
2018-01-09 14:11 ` [dpdk-dev] [PATCH 5/6] net/mlx5: support " Xueming Li
2018-01-09 14:11 ` [dpdk-dev] [PATCH 6/6] app/testpmd: testpmd " Xueming Li
2018-01-16 3:09 ` Lu, Wenzhuo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180109141110.146250-2-xuemingl@mellanox.com \
--to=xuemingl@mellanox.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=olivier.matz@6wind.com \
--cc=shahafs@mellanox.com \
--cc=thomas@monjalon.net \
--cc=yskoh@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).