From: Ori Kam <orika@mellanox.com>
To: "wenzhuo.lu@intel.com" <wenzhuo.lu@intel.com>,
"jingjing.wu@intel.com" <jingjing.wu@intel.com>,
"bernard.iremonger@intel.com" <bernard.iremonger@intel.com>,
"arybchenko@solarflare.com" <arybchenko@solarflare.com>,
"ferruh.yigit@intel.com" <ferruh.yigit@intel.com>,
"stephen@networkplumber.org" <stephen@networkplumber.org>,
Adrien Mazarguil <adrien.mazarguil@6wind.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
"Dekel Peled" <dekelp@mellanox.com>,
"Thomas Monjalon" <thomas@monjalon.net>,
"Nélio Laranjeiro" <nelio.laranjeiro@6wind.com>,
"Yongseok Koh" <yskoh@mellanox.com>,
"Ori Kam" <orika@mellanox.com>,
"Shahaf Shuler" <shahafs@mellanox.com>
Subject: [dpdk-dev] [PATCH v2 2/3] app/testpmd: add MPLSoGRE encapsulation
Date: Sun, 7 Oct 2018 14:41:55 +0000 [thread overview]
Message-ID: <1538923293-127516-3-git-send-email-orika@mellanox.com> (raw)
In-Reply-To: <1538923293-127516-1-git-send-email-orika@mellanox.com>
Due to the complex encapsulation of MPLSoGRE flow action and based on
the fact testpmd does not allocate memory, this patch adds a new command
in testpmd to initialise a global structure containing the necessary
information to make the outer layer of the packet. This same global
structure will then be used by the flow command line in testpmd when
the action mplsoudp_encap will be parsed, at this point, the conversion
into such action becomes trivial.
Signed-off-by: Ori Kam <orika@mellanox.com>
---
app/test-pmd/cmdline.c | 153 ++++++++++++++++++++++++++++
app/test-pmd/cmdline_flow.c | 113 ++++++++++++++++++++
app/test-pmd/testpmd.h | 15 +++
doc/guides/testpmd_app_ug/testpmd_funcs.rst | 46 +++++++++
4 files changed, 327 insertions(+)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 492dc5d..357660b 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -15447,6 +15447,157 @@ static void cmd_set_mplsoudp_parsed(void *parsed_result,
},
};
+/** Set MPLSoGRE encapsulation details */
+struct cmd_set_mplsogre_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t mplsogre;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t vlan_present:1;
+ uint32_t label;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t tci;
+ struct ether_addr eth_src;
+ struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_mplsogre_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, set, "set");
+cmdline_parse_token_string_t cmd_set_mplsogre_mplsogre =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, mplsogre,
+ "mplsogre");
+cmdline_parse_token_string_t cmd_set_mplsogre_mplsogre_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, mplsogre,
+ "mplsogre-with-vlan");
+cmdline_parse_token_string_t cmd_set_mplsogre_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, pos_token,
+ "ip-version");
+cmdline_parse_token_string_t cmd_set_mplsogre_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, ip_version,
+ "ipv4#ipv6");
+cmdline_parse_token_string_t cmd_set_mplsogre_label =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, pos_token,
+ "label");
+cmdline_parse_token_num_t cmd_set_mplsogre_label_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mplsogre_result, label, UINT32);
+cmdline_parse_token_string_t cmd_set_mplsogre_ip_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, pos_token,
+ "ip-src");
+cmdline_parse_token_ipaddr_t cmd_set_mplsogre_ip_src_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsogre_result, ip_src);
+cmdline_parse_token_string_t cmd_set_mplsogre_ip_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, pos_token,
+ "ip-dst");
+cmdline_parse_token_ipaddr_t cmd_set_mplsogre_ip_dst_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsogre_result, ip_dst);
+cmdline_parse_token_string_t cmd_set_mplsogre_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, pos_token,
+ "vlan-tci");
+cmdline_parse_token_num_t cmd_set_mplsogre_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_mplsogre_result, tci, UINT16);
+cmdline_parse_token_string_t cmd_set_mplsogre_eth_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, pos_token,
+ "eth-src");
+cmdline_parse_token_etheraddr_t cmd_set_mplsogre_eth_src_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsogre_result, eth_src);
+cmdline_parse_token_string_t cmd_set_mplsogre_eth_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_result, pos_token,
+ "eth-dst");
+cmdline_parse_token_etheraddr_t cmd_set_mplsogre_eth_dst_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsogre_result, eth_dst);
+
+static void cmd_set_mplsogre_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_mplsogre_result *res = parsed_result;
+ union {
+ uint32_t mplsogre_label;
+ uint8_t label[3];
+ } id = {
+ .mplsogre_label =
+ rte_cpu_to_be_32(res->label) & RTE_BE32(0x00ffffff),
+ };
+
+ if (strcmp(res->mplsogre, "mplsogre") == 0)
+ mplsogre_encap_conf.select_vlan = 0;
+ else if (strcmp(res->mplsogre, "mplsogre-with-vlan") == 0)
+ mplsogre_encap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ mplsogre_encap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ mplsogre_encap_conf.select_ipv4 = 0;
+ else
+ return;
+ rte_memcpy(mplsogre_encap_conf.label, &id.label[1], 3);
+ if (mplsogre_encap_conf.select_ipv4) {
+ IPV4_ADDR_TO_UINT(res->ip_src, mplsogre_encap_conf.ipv4_src);
+ IPV4_ADDR_TO_UINT(res->ip_dst, mplsogre_encap_conf.ipv4_dst);
+ } else {
+ IPV6_ADDR_TO_ARRAY(res->ip_src, mplsogre_encap_conf.ipv6_src);
+ IPV6_ADDR_TO_ARRAY(res->ip_dst, mplsogre_encap_conf.ipv6_dst);
+ }
+ if (mplsogre_encap_conf.select_vlan)
+ mplsogre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
+ rte_memcpy(mplsogre_encap_conf.eth_src, res->eth_src.addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(mplsogre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+ ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_mplsogre = {
+ .f = cmd_set_mplsogre_parsed,
+ .data = NULL,
+ .help_str = "set mplsogre ip-version ipv4|ipv6 label <label>"
+ " ip-src <ip-src> ip-dst <ip-dst> eth-src <eth-src>"
+ " eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_mplsogre_set,
+ (void *)&cmd_set_mplsogre_mplsogre,
+ (void *)&cmd_set_mplsogre_ip_version,
+ (void *)&cmd_set_mplsogre_ip_version_value,
+ (void *)&cmd_set_mplsogre_label,
+ (void *)&cmd_set_mplsogre_label_value,
+ (void *)&cmd_set_mplsogre_ip_src,
+ (void *)&cmd_set_mplsogre_ip_src_value,
+ (void *)&cmd_set_mplsogre_ip_dst,
+ (void *)&cmd_set_mplsogre_ip_dst_value,
+ (void *)&cmd_set_mplsogre_eth_src,
+ (void *)&cmd_set_mplsogre_eth_src_value,
+ (void *)&cmd_set_mplsogre_eth_dst,
+ (void *)&cmd_set_mplsogre_eth_dst_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_mplsogre_with_vlan = {
+ .f = cmd_set_mplsogre_parsed,
+ .data = NULL,
+ .help_str = "set mplsogre-with-vlan ip-version ipv4|ipv6 label <label>"
+ " ip-src <ip-src> ip-dst <ip-dst> vlan-tci <vlan-tci>"
+ " eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_mplsogre_set,
+ (void *)&cmd_set_mplsogre_mplsogre_with_vlan,
+ (void *)&cmd_set_mplsogre_ip_version,
+ (void *)&cmd_set_mplsogre_ip_version_value,
+ (void *)&cmd_set_mplsogre_label,
+ (void *)&cmd_set_mplsogre_label_value,
+ (void *)&cmd_set_mplsogre_ip_src,
+ (void *)&cmd_set_mplsogre_ip_src_value,
+ (void *)&cmd_set_mplsogre_ip_dst,
+ (void *)&cmd_set_mplsogre_ip_dst_value,
+ (void *)&cmd_set_mplsogre_vlan,
+ (void *)&cmd_set_mplsogre_vlan_value,
+ (void *)&cmd_set_mplsogre_eth_src,
+ (void *)&cmd_set_mplsogre_eth_src_value,
+ (void *)&cmd_set_mplsogre_eth_dst,
+ (void *)&cmd_set_mplsogre_eth_dst_value,
+ NULL,
+ },
+};
+
/* Strict link priority scheduling mode setting */
static void
cmd_strict_link_prio_parsed(
@@ -18078,6 +18229,8 @@ struct cmd_config_per_queue_tx_offload_result {
(cmdline_parse_inst_t *)&cmd_set_nvgre_with_vlan,
(cmdline_parse_inst_t *)&cmd_set_mplsoudp,
(cmdline_parse_inst_t *)&cmd_set_mplsoudp_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_mplsogre,
+ (cmdline_parse_inst_t *)&cmd_set_mplsogre_with_vlan,
(cmdline_parse_inst_t *)&cmd_ddp_add,
(cmdline_parse_inst_t *)&cmd_ddp_del,
(cmdline_parse_inst_t *)&cmd_ddp_get_list,
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 7e8de1d..9433823 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -244,6 +244,7 @@ enum index {
ACTION_NVGRE_ENCAP,
ACTION_NVGRE_DECAP,
ACTION_MPLSOUDP_ENCAP,
+ ACTION_MPLSOGRE_ENCAP,
};
/** Maximum size for pattern in struct rte_flow_item_raw. */
@@ -800,6 +801,7 @@ struct parse_action_priv {
ACTION_NVGRE_ENCAP,
ACTION_NVGRE_DECAP,
ACTION_MPLSOUDP_ENCAP,
+ ACTION_MPLSOGRE_ENCAP,
ZERO,
};
@@ -939,6 +941,9 @@ static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
static int parse_vc_action_mplsoudp_encap(struct context *,
const struct token *, const char *,
unsigned int, void *, unsigned int);
+static int parse_vc_action_mplsogre_encap(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
static int parse_destroy(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -2466,6 +2471,15 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
.call = parse_vc_action_mplsoudp_encap,
},
+ [ACTION_MPLSOGRE_ENCAP] = {
+ .name = "mplsogre_encap",
+ .help = "mplsogre encapsulation, uses configuration set by"
+ " \"set vxlan\"",
+ .priv = PRIV_ACTION(TUNNEL_ENCAP_L3,
+ sizeof(struct action_tunnel_encap_l3_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_mplsogre_encap,
+ },
};
/** Remove and return last entry from argument stack. */
@@ -3329,6 +3343,105 @@ static int comp_vc_action_rss_queue(struct context *, const struct token *,
return ret;
}
+/** Parse MPLSoGRE encap action. */
+static int
+parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_tunnel_encap_data *action_encap_data;
+ struct rte_flow_item_eth eth = { .type = 0, };
+ struct rte_flow_item_vlan vlan = {
+ .tci = mplsogre_encap_conf.vlan_tci,
+ .inner_type = 0,
+ };
+ struct rte_flow_item_ipv4 ipv4 = {
+ .hdr = {
+ .src_addr = mplsogre_encap_conf.ipv4_src,
+ .dst_addr = mplsogre_encap_conf.ipv4_dst,
+ .next_proto_id = IP_PROTO_UDP,
+ },
+ };
+ struct rte_flow_item_ipv6 ipv6 = {
+ .hdr = {
+ .proto = IP_PROTO_UDP,
+ },
+ };
+ struct rte_flow_item_gre gre = {
+ .protocol = rte_cpu_to_be_16(0x8848),
+ };
+ struct rte_flow_item_mpls mpls;
+ uint8_t *header;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_encap_data = ctx->object;
+ *action_encap_data = (struct action_tunnel_encap_data) {
+ .conf = (struct rte_flow_action_tunnel_encap){
+ .buf = action_encap_data->buf,
+ },
+ .buf = {},
+ };
+ header = action_encap_data->buf;
+ if (mplsogre_encap_conf.select_vlan)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
+ else if (mplsogre_encap_conf.select_ipv4)
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(eth.dst.addr_bytes,
+ mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(eth.src.addr_bytes,
+ mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ memcpy(header, ð, sizeof(eth));
+ header += sizeof(eth);
+ if (mplsogre_encap_conf.select_vlan) {
+ if (mplsogre_encap_conf.select_ipv4)
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ else
+ vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ memcpy(header, &vlan, sizeof(vlan));
+ header += sizeof(vlan);
+ }
+ if (mplsogre_encap_conf.select_ipv4) {
+ memcpy(header, &ipv4, sizeof(ipv4));
+ header += sizeof(ipv4);
+ } else {
+ memcpy(&ipv6.hdr.src_addr,
+ &mplsogre_encap_conf.ipv6_src,
+ sizeof(mplsogre_encap_conf.ipv6_src));
+ memcpy(&ipv6.hdr.dst_addr,
+ &mplsogre_encap_conf.ipv6_dst,
+ sizeof(mplsogre_encap_conf.ipv6_dst));
+ memcpy(header, &ipv6, sizeof(ipv6));
+ header += sizeof(ipv6);
+ }
+ memcpy(header, &gre, sizeof(gre));
+ header += sizeof(gre);
+ memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
+ RTE_DIM(mplsogre_encap_conf.label));
+ memcpy(header, &mpls, sizeof(mpls));
+ header += sizeof(mpls);
+ action_encap_data->conf.size = header -
+ action_encap_data->buf;
+ action->conf = &action_encap_data->conf;
+ return ret;
+}
+
/** Parse tokens for destroy command. */
static int
parse_destroy(struct context *ctx, const struct token *token,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 57fc6d1..6d1b2c3 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -501,6 +501,21 @@ struct mplsoudp_encap_conf {
};
struct mplsoudp_encap_conf mplsoudp_encap_conf;
+/* MPLSoGRE encap parameters. */
+struct mplsogre_encap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+ uint8_t label[3];
+ rte_be32_t ipv4_src;
+ rte_be32_t ipv4_dst;
+ uint8_t ipv6_src[16];
+ uint8_t ipv6_dst[16];
+ rte_be16_t vlan_tci;
+ uint8_t eth_src[ETHER_ADDR_LEN];
+ uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct mplsogre_encap_conf mplsogre_encap_conf;
+
static inline unsigned int
lcore_num(void)
{
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index e07a6f8..61280f1 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -1587,6 +1587,21 @@ flow rule using the action mplsoudp_encap will use the last configuration set.
To have a different encapsulation header, one of those commands must be called
before the flow rule creation.
+Config MPLSoGRE Encap outer layers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configure the outer layer to encapsulate a packet inside a MPLSoGRE tunnel::
+
+ set mplsoudp ip-version (ipv4|ipv6) label (label) ip-src (ip-src) \
+ ip-dst (ip-dst) eth-src (eth-src) eth-dst (eth-dst)
+ set mplsoudp-with-vlan ip-version (ipv4|ipv6) label (label) ip-src (ip-src) \
+ ip-dst (ip-dst) vlan-tci (vlan-tci) eth-src (eth-src) eth-dst (eth-dst)
+
+Those command will set an internal configuration inside testpmd, any following
+flow rule using the action mplsogre_encap will use the last configuration set.
+To have a different encapsulation header, one of those commands must be called
+before the flow rule creation.
+
Port Functions
--------------
@@ -3724,6 +3739,9 @@ This section lists supported actions and their attributes, if any.
- ``mplsoudp_encap``: Performs a MPLSoUDP encapsulation, outer layer
configuration is done through `Config MPLSoUDP Encap outer layers`_.
+- ``mplsogre_encap``: Performs a MPLSoGRE encapsulation, outer layer
+ configuration is done through `Config MPLSoGRE Encap outer layers`_.
+
Destroying flow rules
~~~~~~~~~~~~~~~~~~~~~
@@ -4082,6 +4100,34 @@ IPv6 MPLSoUDP outer header::
eth-dst 22:22:22:22:22:22
testpmd> flow create 0 egress pattern end actions mplsoudp_encap / end
+Sample MPLSoGRE encapsulation rule
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+MPLSoGRE encapsulation outer layer has default value pre-configured in testpmd
+source code, those can be changed by using the following commands
+
+IPv4 MPLSoGRE outer header::
+
+ testpmd> set mplsoudp ip-version ipv4 label 4 ip-src 127.0.0.1 ip-dst 128.0.0.1
+ eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 egress pattern end actions mplsoudp_encap / end
+
+ testpmd> set mplsoudp-with-vlan ip-version ipv4 label 4 ip-src 127.0.0.1
+ ip-dst 128.0.0.1 vlan-tci 34 eth-src 11:11:11:11:11:11
+ eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 egress pattern end actions mplsoudp_encap / end
+
+IPv6 MPLSoUDP outer header::
+
+ testpmd> set mplsoudp ip-version ipv6 mask 4 ip-src ::1 ip-dst ::2222
+ eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 egress pattern end actions mplsoudp_encap / end
+
+ testpmd> set mplsoudp-with-vlan ip-version ipv6 mask 4 ip-src ::1
+ ip-dst ::2222 vlan-tci 34 eth-src 11:11:11:11:11:11
+ eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 egress pattern end actions mplsoudp_encap / end
+
BPF Functions
--------------
--
1.8.3.1
next prev parent reply other threads:[~2018-10-07 14:41 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-29 20:07 [dpdk-dev] [PATCH 0/3] app/testpmd: add l3 encap/decap cmd Ori Kam
2018-09-29 20:07 ` [dpdk-dev] [PATCH 1/3] app/testpmd: add MPLSoUDP encapsulation Ori Kam
2018-09-29 20:07 ` [dpdk-dev] [PATCH 2/3] app/testpmd: add MPLSoGRE encapsulation Ori Kam
2018-09-29 20:07 ` [dpdk-dev] [PATCH 3/3] app/testpmd: add decap l3 command Ori Kam
2018-10-05 13:35 ` [dpdk-dev] [PATCH 0/3] app/testpmd: add l3 encap/decap cmd Ferruh Yigit
2018-10-05 14:00 ` Ori Kam
2018-10-07 14:41 ` [dpdk-dev] [PATCH v2 " Ori Kam
2018-10-07 14:41 ` [dpdk-dev] [PATCH v2 1/3] app/testpmd: add MPLSoUDP encapsulation Ori Kam
2018-10-07 14:41 ` Ori Kam [this message]
2018-10-07 14:41 ` [dpdk-dev] [PATCH v2 3/3] app/testpmd: add decap l3 command Ori Kam
2018-10-09 16:48 ` [dpdk-dev] [PATCH v2 0/3] app/testpmd: add l3 encap/decap cmd Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1538923293-127516-3-git-send-email-orika@mellanox.com \
--to=orika@mellanox.com \
--cc=adrien.mazarguil@6wind.com \
--cc=arybchenko@solarflare.com \
--cc=bernard.iremonger@intel.com \
--cc=dekelp@mellanox.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=jingjing.wu@intel.com \
--cc=nelio.laranjeiro@6wind.com \
--cc=shahafs@mellanox.com \
--cc=stephen@networkplumber.org \
--cc=thomas@monjalon.net \
--cc=wenzhuo.lu@intel.com \
--cc=yskoh@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).