DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ori Kam <orika@mellanox.com>
To: arybchenko@solarflare.com, ferruh.yigit@intel.com,
	stephen@networkplumber.org, adrien.mazarguil@6wind.com
Cc: dev@dpdk.org, dekelp@mellanox.com, thomas@monjalon.net,
	nelio.laranjeiro@6wind.com, yskoh@mellanox.com,
	orika@mellanox.com
Subject: [dpdk-dev] [PATCH 2/3] ethdev: convert testpmd encap commands to new API
Date: Sun, 16 Sep 2018 16:53:43 +0000	[thread overview]
Message-ID: <1537116824-191205-3-git-send-email-orika@mellanox.com> (raw)
In-Reply-To: <1537116824-191205-1-git-send-email-orika@mellanox.com>

Currently there are 2 encapsulation commands in testpmd one for VXLAN
and one for NVGRE, both of those commands are using the old rte encap
command.

This commit update the commands to work with the new tunnel encap
actions.

The reason that we have different encapsulation commands, one for VXLAN
and one for NVGRE is the ease of use in testpmd, both commands are using
the same rte flow action for tunnel encap.

Signed-off-by: Ori Kam <orika@mellanox.com>
---
 app/test-pmd/cmdline_flow.c | 294 +++++++++++++++++++++-----------------------
 1 file changed, 137 insertions(+), 157 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index f926060..349e822 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -262,37 +262,13 @@ struct action_rss_data {
 	uint16_t queue[ACTION_RSS_QUEUE_NUM];
 };
 
-/** Maximum number of items in struct rte_flow_action_vxlan_encap. */
-#define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
-
-/** Storage for struct rte_flow_action_vxlan_encap including external data. */
-struct action_vxlan_encap_data {
-	struct rte_flow_action_vxlan_encap conf;
-	struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
-	struct rte_flow_item_eth item_eth;
-	struct rte_flow_item_vlan item_vlan;
-	union {
-		struct rte_flow_item_ipv4 item_ipv4;
-		struct rte_flow_item_ipv6 item_ipv6;
-	};
-	struct rte_flow_item_udp item_udp;
-	struct rte_flow_item_vxlan item_vxlan;
-};
+/** Maximum buffer size for the encap data. */
+#define ACTION_TUNNEL_ENCAP_MAX_BUFFER_SIZE 64
 
-/** Maximum number of items in struct rte_flow_action_nvgre_encap. */
-#define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
-
-/** Storage for struct rte_flow_action_nvgre_encap including external data. */
-struct action_nvgre_encap_data {
-	struct rte_flow_action_nvgre_encap conf;
-	struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
-	struct rte_flow_item_eth item_eth;
-	struct rte_flow_item_vlan item_vlan;
-	union {
-		struct rte_flow_item_ipv4 item_ipv4;
-		struct rte_flow_item_ipv6 item_ipv6;
-	};
-	struct rte_flow_item_nvgre item_nvgre;
+/** Storage for struct rte_flow_action_tunnel_encap including external data. */
+struct action_tunnel_encap_data {
+	struct rte_flow_action_tunnel_encap conf;
+	uint8_t buf[ACTION_TUNNEL_ENCAP_MAX_BUFFER_SIZE];
 };
 
 /** Maximum number of subsequent tokens and arguments on the stack. */
@@ -2438,8 +2414,8 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 		.name = "vxlan_encap",
 		.help = "VXLAN encapsulation, uses configuration set by \"set"
 			" vxlan\"",
-		.priv = PRIV_ACTION(VXLAN_ENCAP,
-				    sizeof(struct action_vxlan_encap_data)),
+		.priv = PRIV_ACTION(TUNNEL_ENCAP,
+				    sizeof(struct action_tunnel_encap_data)),
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc_action_vxlan_encap,
 	},
@@ -2448,7 +2424,7 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 		.help = "Performs a decapsulation action by stripping all"
 			" headers of the VXLAN tunnel network overlay from the"
 			" matched flow.",
-		.priv = PRIV_ACTION(VXLAN_DECAP, 0),
+		.priv = PRIV_ACTION(TUNNEL_DECAP, 0),
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc,
 	},
@@ -2456,8 +2432,8 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 		.name = "nvgre_encap",
 		.help = "NVGRE encapsulation, uses configuration set by \"set"
 			" nvgre\"",
-		.priv = PRIV_ACTION(NVGRE_ENCAP,
-				    sizeof(struct action_nvgre_encap_data)),
+		.priv = PRIV_ACTION(TUNNEL_ENCAP,
+				    sizeof(struct action_tunnel_encap_data)),
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc_action_nvgre_encap,
 	},
@@ -2466,7 +2442,7 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 		.help = "Performs a decapsulation action by stripping all"
 			" headers of the NVGRE tunnel network overlay from the"
 			" matched flow.",
-		.priv = PRIV_ACTION(NVGRE_DECAP, 0),
+		.priv = PRIV_ACTION(TUNNEL_DECAP, 0),
 		.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
 		.call = parse_vc,
 	},
@@ -3034,6 +3010,9 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 	return len;
 }
 
+/** IP next protocol UDP. */
+#define IP_PROTO_UDP 0x11
+
 /** Parse VXLAN encap action. */
 static int
 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
@@ -3042,7 +3021,32 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 {
 	struct buffer *out = buf;
 	struct rte_flow_action *action;
-	struct action_vxlan_encap_data *action_vxlan_encap_data;
+	struct action_tunnel_encap_data *action_vxlan_encap_data;
+	struct rte_flow_item_eth eth = { .type = 0, };
+	struct rte_flow_item_vlan vlan = {
+		.tci = vxlan_encap_conf.vlan_tci,
+		.inner_type = 0,
+	};
+	struct rte_flow_item_ipv4 ipv4 = {
+		.hdr =  {
+			.src_addr = vxlan_encap_conf.ipv4_src,
+			.dst_addr = vxlan_encap_conf.ipv4_dst,
+			.next_proto_id = IP_PROTO_UDP,
+		},
+	};
+	struct rte_flow_item_ipv6 ipv6 = {
+		.hdr =  {
+			.proto = IP_PROTO_UDP,
+		},
+	};
+	struct rte_flow_item_udp udp = {
+		.hdr = {
+			.src_port = vxlan_encap_conf.udp_src,
+			.dst_port = vxlan_encap_conf.udp_dst,
+		},
+	};
+	struct rte_flow_item_vxlan vxlan = { .flags = 0, };
+	uint8_t *header;
 	int ret;
 
 	ret = parse_vc(ctx, token, str, len, buf, size);
@@ -3057,83 +3061,58 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 	/* Point to selected object. */
 	ctx->object = out->args.vc.data;
 	ctx->objmask = NULL;
-	/* Set up default configuration. */
+	/* Copy the headers to the buffer. */
 	action_vxlan_encap_data = ctx->object;
-	*action_vxlan_encap_data = (struct action_vxlan_encap_data){
-		.conf = (struct rte_flow_action_vxlan_encap){
-			.definition = action_vxlan_encap_data->items,
-		},
-		.items = {
-			{
-				.type = RTE_FLOW_ITEM_TYPE_ETH,
-				.spec = &action_vxlan_encap_data->item_eth,
-				.mask = &rte_flow_item_eth_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_VLAN,
-				.spec = &action_vxlan_encap_data->item_vlan,
-				.mask = &rte_flow_item_vlan_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_IPV4,
-				.spec = &action_vxlan_encap_data->item_ipv4,
-				.mask = &rte_flow_item_ipv4_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_UDP,
-				.spec = &action_vxlan_encap_data->item_udp,
-				.mask = &rte_flow_item_udp_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_VXLAN,
-				.spec = &action_vxlan_encap_data->item_vxlan,
-				.mask = &rte_flow_item_vxlan_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_END,
-			},
-		},
-		.item_eth.type = 0,
-		.item_vlan = {
-			.tci = vxlan_encap_conf.vlan_tci,
-			.inner_type = 0,
-		},
-		.item_ipv4.hdr = {
-			.src_addr = vxlan_encap_conf.ipv4_src,
-			.dst_addr = vxlan_encap_conf.ipv4_dst,
+	*action_vxlan_encap_data = (struct action_tunnel_encap_data) {
+		.conf = (struct rte_flow_action_tunnel_encap){
+			.buf = action_vxlan_encap_data->buf,
 		},
-		.item_udp.hdr = {
-			.src_port = vxlan_encap_conf.udp_src,
-			.dst_port = vxlan_encap_conf.udp_dst,
-		},
-		.item_vxlan.flags = 0,
+		.buf = {},
 	};
-	memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
+	header = action_vxlan_encap_data->buf;
+	if (vxlan_encap_conf.select_vlan)
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+	else if (vxlan_encap_conf.select_ipv4)
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+	else
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+	memcpy(eth.dst.addr_bytes,
 	       vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
-	memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
+	memcpy(eth.src.addr_bytes,
 	       vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
-	if (!vxlan_encap_conf.select_ipv4) {
-		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
+	memcpy(header, &eth, sizeof(eth));
+	header += sizeof(eth);
+	if (vxlan_encap_conf.select_vlan) {
+		if (vxlan_encap_conf.select_ipv4)
+			vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		else
+			vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		memcpy(header, &vlan, sizeof(vlan));
+		header += sizeof(vlan);
+	}
+	if (vxlan_encap_conf.select_ipv4) {
+		memcpy(header, &ipv4, sizeof(ipv4));
+		header += sizeof(ipv4);
+	} else {
+		memcpy(&ipv6.hdr.src_addr,
 		       &vxlan_encap_conf.ipv6_src,
 		       sizeof(vxlan_encap_conf.ipv6_src));
-		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
+		memcpy(&ipv6.hdr.dst_addr,
 		       &vxlan_encap_conf.ipv6_dst,
 		       sizeof(vxlan_encap_conf.ipv6_dst));
-		action_vxlan_encap_data->items[2] = (struct rte_flow_item){
-			.type = RTE_FLOW_ITEM_TYPE_IPV6,
-			.spec = &action_vxlan_encap_data->item_ipv6,
-			.mask = &rte_flow_item_ipv6_mask,
-		};
+		memcpy(header, &ipv6, sizeof(ipv6));
+		header += sizeof(ipv6);
 	}
-	if (!vxlan_encap_conf.select_vlan)
-		action_vxlan_encap_data->items[1].type =
-			RTE_FLOW_ITEM_TYPE_VOID;
-	memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
-	       RTE_DIM(vxlan_encap_conf.vni));
+	memcpy(header, &udp, sizeof(udp));
+	header += sizeof(udp);
+	memcpy(vxlan.vni, vxlan_encap_conf.vni, RTE_DIM(vxlan_encap_conf.vni));
+	memcpy(header, &vxlan, sizeof(vxlan));
+	header += sizeof(vxlan);
+	action_vxlan_encap_data->conf.size = header -
+		action_vxlan_encap_data->buf;
 	action->conf = &action_vxlan_encap_data->conf;
 	return ret;
 }
-
 /** Parse NVGRE encap action. */
 static int
 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
@@ -3142,7 +3121,26 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 {
 	struct buffer *out = buf;
 	struct rte_flow_action *action;
-	struct action_nvgre_encap_data *action_nvgre_encap_data;
+	struct action_tunnel_encap_data *action_nvgre_encap_data;
+	struct rte_flow_item_eth eth = { .type = 0, };
+	struct rte_flow_item_vlan vlan = {
+		.tci = nvgre_encap_conf.vlan_tci,
+		.inner_type = 0,
+	};
+	struct rte_flow_item_ipv4 ipv4 = {
+		.hdr =  {
+			.src_addr = nvgre_encap_conf.ipv4_src,
+			.dst_addr = nvgre_encap_conf.ipv4_dst,
+			.next_proto_id = IP_PROTO_UDP,
+		},
+	};
+	struct rte_flow_item_ipv6 ipv6 = {
+		.hdr =  {
+			.proto = IP_PROTO_UDP,
+		},
+	};
+	struct rte_flow_item_nvgre nvgre = { .flow_id = 0, };
+	uint8_t *header;
 	int ret;
 
 	ret = parse_vc(ctx, token, str, len, buf, size);
@@ -3157,74 +3155,56 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
 	/* Point to selected object. */
 	ctx->object = out->args.vc.data;
 	ctx->objmask = NULL;
-	/* Set up default configuration. */
+	/* Copy the headers to the buffer. */
 	action_nvgre_encap_data = ctx->object;
-	*action_nvgre_encap_data = (struct action_nvgre_encap_data){
-		.conf = (struct rte_flow_action_nvgre_encap){
-			.definition = action_nvgre_encap_data->items,
-		},
-		.items = {
-			{
-				.type = RTE_FLOW_ITEM_TYPE_ETH,
-				.spec = &action_nvgre_encap_data->item_eth,
-				.mask = &rte_flow_item_eth_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_VLAN,
-				.spec = &action_nvgre_encap_data->item_vlan,
-				.mask = &rte_flow_item_vlan_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_IPV4,
-				.spec = &action_nvgre_encap_data->item_ipv4,
-				.mask = &rte_flow_item_ipv4_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_NVGRE,
-				.spec = &action_nvgre_encap_data->item_nvgre,
-				.mask = &rte_flow_item_nvgre_mask,
-			},
-			{
-				.type = RTE_FLOW_ITEM_TYPE_END,
-			},
-		},
-		.item_eth.type = 0,
-		.item_vlan = {
-			.tci = nvgre_encap_conf.vlan_tci,
-			.inner_type = 0,
-		},
-		.item_ipv4.hdr = {
-		       .src_addr = nvgre_encap_conf.ipv4_src,
-		       .dst_addr = nvgre_encap_conf.ipv4_dst,
+	*action_nvgre_encap_data = (struct action_tunnel_encap_data) {
+		.conf = (struct rte_flow_action_tunnel_encap){
+			.buf = action_nvgre_encap_data->buf,
 		},
-		.item_nvgre.flow_id = 0,
+		.buf = {},
 	};
-	memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
+	header = action_nvgre_encap_data->buf;
+	if (nvgre_encap_conf.select_vlan)
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+	else if (nvgre_encap_conf.select_ipv4)
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+	else
+		eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+	memcpy(eth.dst.addr_bytes,
 	       nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
-	memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
+	memcpy(eth.src.addr_bytes,
 	       nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
-	if (!nvgre_encap_conf.select_ipv4) {
-		memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
+	memcpy(header, &eth, sizeof(eth));
+	header += sizeof(eth);
+	if (nvgre_encap_conf.select_vlan) {
+		if (nvgre_encap_conf.select_ipv4)
+			vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		else
+			vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+		memcpy(header, &vlan, sizeof(vlan));
+		header += sizeof(vlan);
+	}
+	if (nvgre_encap_conf.select_ipv4) {
+		memcpy(header, &ipv4, sizeof(ipv4));
+		header += sizeof(ipv4);
+	} else {
+		memcpy(&ipv6.hdr.src_addr,
 		       &nvgre_encap_conf.ipv6_src,
 		       sizeof(nvgre_encap_conf.ipv6_src));
-		memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
+		memcpy(&ipv6.hdr.dst_addr,
 		       &nvgre_encap_conf.ipv6_dst,
 		       sizeof(nvgre_encap_conf.ipv6_dst));
-		action_nvgre_encap_data->items[2] = (struct rte_flow_item){
-			.type = RTE_FLOW_ITEM_TYPE_IPV6,
-			.spec = &action_nvgre_encap_data->item_ipv6,
-			.mask = &rte_flow_item_ipv6_mask,
-		};
+		memcpy(header, &ipv6, sizeof(ipv6));
+		header += sizeof(ipv6);
 	}
-	if (!nvgre_encap_conf.select_vlan)
-		action_nvgre_encap_data->items[1].type =
-			RTE_FLOW_ITEM_TYPE_VOID;
-	memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
-	       RTE_DIM(nvgre_encap_conf.tni));
+	memcpy(nvgre.tni, nvgre_encap_conf.tni, RTE_DIM(nvgre_encap_conf.tni));
+	memcpy(header, &nvgre, sizeof(nvgre));
+	header += sizeof(nvgre);
+	action_nvgre_encap_data->conf.size = header -
+		action_nvgre_encap_data->buf;
 	action->conf = &action_nvgre_encap_data->conf;
 	return ret;
 }
-
 /** Parse tokens for destroy command. */
 static int
 parse_destroy(struct context *ctx, const struct token *token,
-- 
1.8.3.1

  parent reply	other threads:[~2018-09-16 16:56 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-16 16:53 [dpdk-dev] [PATCH 0/3] add generic L2/L3 tunnel encapsulation actions Ori Kam
2018-09-16 16:53 ` [dpdk-dev] [PATCH 1/3] ethdev: " Ori Kam
2018-09-16 16:53 ` Ori Kam [this message]
2018-09-16 16:53 ` [dpdk-dev] [PATCH 3/3] ethdev: remove vxlan and nvgre encapsulation commands Ori Kam
2018-09-26 21:00 ` [dpdk-dev] [PATCH v2 0/3] ethdev: add generic L2/L3 tunnel encapsulation actions Ori Kam
2018-09-26 21:00   ` [dpdk-dev] [PATCH v2 1/3] " Ori Kam
2018-09-26 21:00   ` [dpdk-dev] [PATCH v2 2/3] app/testpmd: convert testpmd encap commands to new API Ori Kam
2018-09-26 21:00   ` [dpdk-dev] [PATCH v2 3/3] ethdev: remove vxlan and nvgre encapsulation commands Ori Kam
2018-10-05 12:59     ` Ferruh Yigit
2018-10-05 13:26       ` Awal, Mohammad Abdul
2018-10-05 13:27     ` Mohammad Abdul Awal
2018-10-03 20:38   ` [dpdk-dev] [PATCH v2 0/3] ethdev: add generic L2/L3 tunnel encapsulation actions Thomas Monjalon
2018-10-05 12:57   ` Ferruh Yigit
2018-10-05 14:00     ` Ori Kam
2018-10-07 12:57   ` [dpdk-dev] [PATCH v3 " Ori Kam
2018-10-07 12:57     ` [dpdk-dev] [PATCH v3 1/3] " Ori Kam
2018-10-07 12:57     ` [dpdk-dev] [PATCH v3 2/3] app/testpmd: convert testpmd encap commands to new API Ori Kam
2018-10-07 12:57     ` [dpdk-dev] [PATCH v3 3/3] ethdev: remove vxlan and nvgre encapsulation commands Ori Kam
2018-10-09 16:48     ` [dpdk-dev] [PATCH v3 0/3] ethdev: add generic L2/L3 tunnel encapsulation actions Ferruh Yigit
2018-10-10  6:45       ` Andrew Rybchenko
2018-10-10  9:00         ` Ori Kam
2018-10-10  9:30           ` Andrew Rybchenko
2018-10-10  9:38             ` Thomas Monjalon
2018-10-10 12:02           ` Adrien Mazarguil
2018-10-10 13:17             ` Ori Kam
2018-10-10 16:10               ` Adrien Mazarguil
2018-10-11  8:48                 ` Ori Kam
2018-10-11 13:12                   ` Adrien Mazarguil
2018-10-11 13:55                     ` Ori Kam
2018-10-16 21:40     ` [dpdk-dev] [PATCH v4 0/3] ethdev: add generic raw " Ori Kam
2018-10-16 21:41       ` [dpdk-dev] [PATCH v4 1/3] ethdev: add raw encapsulation action Ori Kam
2018-10-17  7:56         ` Andrew Rybchenko
2018-10-17  8:43           ` Ori Kam
2018-10-22 13:06             ` Andrew Rybchenko
2018-10-22 13:19               ` Ori Kam
2018-10-22 13:27                 ` Andrew Rybchenko
2018-10-22 13:32                   ` Ori Kam
2018-10-16 21:41       ` [dpdk-dev] [PATCH v4 2/3] app/testpmd: add MPLSoUDP encapsulation Ori Kam
2018-10-16 21:41       ` [dpdk-dev] [PATCH v4 3/3] app/testpmd: add MPLSoGRE encapsulation Ori Kam
2018-10-17 17:07       ` [dpdk-dev] [PATCH v5 0/3] ethdev: add generic raw tunnel encapsulation actions Ori Kam
2018-10-17 17:07         ` [dpdk-dev] [PATCH v5 1/3] ethdev: add raw encapsulation action Ori Kam
2018-10-22 14:15           ` Andrew Rybchenko
2018-10-22 14:31             ` Ori Kam
2018-10-17 17:07         ` [dpdk-dev] [PATCH v5 2/3] app/testpmd: add MPLSoUDP encapsulation Ori Kam
2018-10-17 17:07         ` [dpdk-dev] [PATCH v5 3/3] app/testpmd: add MPLSoGRE encapsulation Ori Kam
2018-10-22 14:45         ` [dpdk-dev] [PATCH v5 0/3] ethdev: add generic raw tunnel encapsulation actions Ferruh Yigit
2018-10-22 17:38         ` [dpdk-dev] [PATCH v6 0/3] ethdev: add generic raw tunnel encapsulation Ori Kam
2018-10-22 17:38           ` [dpdk-dev] [PATCH v6 1/3] ethdev: add raw encapsulation action Ori Kam
2018-10-22 17:38           ` [dpdk-dev] [PATCH v6 2/3] app/testpmd: add MPLSoUDP encapsulation Ori Kam
2018-10-23  9:55             ` Ferruh Yigit
2018-10-22 17:38           ` [dpdk-dev] [PATCH v6 3/3] app/testpmd: add MPLSoGRE encapsulation Ori Kam
2018-10-23  9:56             ` Ferruh Yigit
2018-10-23  9:56           ` [dpdk-dev] [PATCH v6 0/3] ethdev: add generic raw tunnel encapsulation Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1537116824-191205-3-git-send-email-orika@mellanox.com \
    --to=orika@mellanox.com \
    --cc=adrien.mazarguil@6wind.com \
    --cc=arybchenko@solarflare.com \
    --cc=dekelp@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=nelio.laranjeiro@6wind.com \
    --cc=stephen@networkplumber.org \
    --cc=thomas@monjalon.net \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).