From: Mohammad Abdul Awal <mohammad.abdul.awal@intel.com>
To: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>,
dev@dpdk.org, Adrien Mazarguil <adrien.mazarguil@6wind.com>,
Wenzhuo Lu <wenzhuo.lu@intel.com>,
Jingjing Wu <jingjing.wu@intel.com>,
Bernard Iremonger <bernard.iremonger@intel.com>
Subject: Re: [dpdk-dev] [PATCH v2 1/2] app/testpmd: add VXLAN encap/decap support
Date: Mon, 18 Jun 2018 13:47:55 +0100 [thread overview]
Message-ID: <43504c63-4fd8-c8fc-55fc-2f81549600b7@intel.com> (raw)
In-Reply-To: <9356cee477d0d3ea3984f3bafee032d92554a7e0.1529311722.git.nelio.laranjeiro@6wind.com>
Hi Nelio,
On 18/06/2018 09:52, Nelio Laranjeiro wrote:
> Due to the complex VXLAN_ENCAP flow action and based on the fact testpmd
> does not allocate memory, this patch adds a new command in testpmd to
> initialise a global structure containing the necessary information to
> make the outer layer of the packet. This same global structure will
> then be used by the flow command line in testpmd when the action
> vxlan_encap will be parsed, at this point, the conversion into such
> action becomes trivial.
>
> This global structure is only used for the encap action.
>
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> ---
> app/test-pmd/cmdline.c | 90 ++++++++++++++
> app/test-pmd/cmdline_flow.c | 129 ++++++++++++++++++++
> app/test-pmd/testpmd.c | 15 +++
> app/test-pmd/testpmd.h | 15 +++
> doc/guides/testpmd_app_ug/testpmd_funcs.rst | 12 ++
> 5 files changed, 261 insertions(+)
>
> diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
> index 27e2aa8c8..a3b98b2f2 100644
> --- a/app/test-pmd/cmdline.c
> +++ b/app/test-pmd/cmdline.c
> @@ -781,6 +781,10 @@ static void cmd_help_long_parsed(void *parsed_result,
> "port tm hierarchy commit (port_id) (clean_on_fail)\n"
> " Commit tm hierarchy.\n\n"
>
> + "vxlan ipv4|ipv6 vni udp-src udp-dst ip-src ip-dst"
> + " eth-src eth-dst\n"
> + " Configure the VXLAN encapsulation for flows.\n\n"
> +
Should there be support for outer VLAN header according to the definitions?
> , list_pkt_forwarding_modes()
> );
> }
> @@ -14838,6 +14842,91 @@ cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = {
> };
> #endif
>
> +/** Set VXLAN encapsulation details */
> +struct cmd_set_vxlan_result {
> + cmdline_fixed_string_t set;
> + cmdline_fixed_string_t vxlan;
> + cmdline_fixed_string_t ip_version;
> + uint32_t vni;
> + uint16_t udp_src;
> + uint16_t udp_dst;
> + cmdline_ipaddr_t ip_src;
> + cmdline_ipaddr_t ip_dst;
> + struct ether_addr eth_src;
> + struct ether_addr eth_dst;
> +};
> +
> +cmdline_parse_token_string_t cmd_set_vxlan_set =
> + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, set, "set");
> +cmdline_parse_token_string_t cmd_set_vxlan_vxlan =
> + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan, "vxlan");
> +cmdline_parse_token_string_t cmd_set_vxlan_ip_version =
> + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, ip_version,
> + "ipv4#ipv6");
> +cmdline_parse_token_num_t cmd_set_vxlan_vni =
> + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, vni, UINT32);
> +cmdline_parse_token_num_t cmd_set_vxlan_udp_src =
> + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_src, UINT16);
> +cmdline_parse_token_num_t cmd_set_vxlan_udp_dst =
> + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_dst, UINT16);
> +cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_src =
> + TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_src);
> +cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_dst =
> + TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_dst);
> +cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_src =
> + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_src);
> +cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_dst =
> + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_dst);
> +
> +static void cmd_set_vxlan_parsed(void *parsed_result,
> + __attribute__((unused)) struct cmdline *cl,
> + __attribute__((unused)) void *data)
> +{
> + struct cmd_set_vxlan_result *res = parsed_result;
> + uint32_t vni = rte_cpu_to_be_32(res->vni) >> 8;
> +
> + if (strcmp(res->ip_version, "ipv4") == 0)
> + vxlan_encap_conf.select_ipv4 = 1;
> + else if (strcmp(res->ip_version, "ipv6") == 0)
> + vxlan_encap_conf.select_ipv4 = 0;
> + else
> + return;
> + memcpy(vxlan_encap_conf.vni, &vni, 3);
> + vxlan_encap_conf.udp_src = rte_cpu_to_be_16(res->udp_src);
> + vxlan_encap_conf.udp_dst = rte_cpu_to_be_16(res->udp_dst);
> + if (vxlan_encap_conf.select_ipv4) {
> + IPV4_ADDR_TO_UINT(res->ip_src, vxlan_encap_conf.ipv4_src);
> + IPV4_ADDR_TO_UINT(res->ip_dst, vxlan_encap_conf.ipv4_dst);
> + } else {
> + IPV6_ADDR_TO_ARRAY(res->ip_src, vxlan_encap_conf.ipv6_src);
> + IPV6_ADDR_TO_ARRAY(res->ip_dst, vxlan_encap_conf.ipv6_dst);
> + }
> + memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes,
> + ETHER_ADDR_LEN);
> + memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes,
> + ETHER_ADDR_LEN);
> +}
> +
> +cmdline_parse_inst_t cmd_set_vxlan = {
> + .f = cmd_set_vxlan_parsed,
> + .data = NULL,
> + .help_str = "set vxlan ipv4|ipv6 <vni> <udp-src> <udp-dst> <ip-src>"
> + " <ip-dst> <eth-src> <eth-dst>",
> + .tokens = {
> + (void *)&cmd_set_vxlan_set,
> + (void *)&cmd_set_vxlan_vxlan,
> + (void *)&cmd_set_vxlan_ip_version,
> + (void *)&cmd_set_vxlan_vni,
> + (void *)&cmd_set_vxlan_udp_src,
> + (void *)&cmd_set_vxlan_udp_dst,
> + (void *)&cmd_set_vxlan_ip_src,
> + (void *)&cmd_set_vxlan_ip_dst,
> + (void *)&cmd_set_vxlan_eth_src,
> + (void *)&cmd_set_vxlan_eth_dst,
> + NULL,
> + },
> +};
> +
> /* Strict link priority scheduling mode setting */
> static void
> cmd_strict_link_prio_parsed(
> @@ -17462,6 +17551,7 @@ cmdline_parse_ctx_t main_ctx[] = {
> #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
> (cmdline_parse_inst_t *)&cmd_set_port_tm_hierarchy_default,
> #endif
> + (cmdline_parse_inst_t *)&cmd_set_vxlan,
> (cmdline_parse_inst_t *)&cmd_ddp_add,
> (cmdline_parse_inst_t *)&cmd_ddp_del,
> (cmdline_parse_inst_t *)&cmd_ddp_get_list,
> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> index 934cf7e90..4f4aba407 100644
> --- a/app/test-pmd/cmdline_flow.c
> +++ b/app/test-pmd/cmdline_flow.c
> @@ -239,6 +239,8 @@ enum index {
> ACTION_OF_POP_MPLS_ETHERTYPE,
> ACTION_OF_PUSH_MPLS,
> ACTION_OF_PUSH_MPLS_ETHERTYPE,
> + ACTION_VXLAN_ENCAP,
> + ACTION_VXLAN_DECAP,
> };
>
> /** Maximum size for pattern in struct rte_flow_item_raw. */
> @@ -258,6 +260,22 @@ struct action_rss_data {
> uint16_t queue[ACTION_RSS_QUEUE_NUM];
> };
>
> +/** Maximum number of items in struct rte_flow_action_vxlan_encap. */
> +#define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
> +
> +/** Storage for struct rte_flow_action_vxlan_encap including external data. */
> +struct action_vxlan_encap_data {
> + struct rte_flow_action_vxlan_encap conf;
> + struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
> + struct rte_flow_item_eth item_eth;
> + union {
> + struct rte_flow_item_ipv4 item_ipv4;
> + struct rte_flow_item_ipv6 item_ipv6;
> + };
> + struct rte_flow_item_udp item_udp;
> + struct rte_flow_item_vxlan item_vxlan;
> +};
> +
> /** Maximum number of subsequent tokens and arguments on the stack. */
> #define CTX_STACK_SIZE 16
>
> @@ -775,6 +793,8 @@ static const enum index next_action[] = {
> ACTION_OF_SET_VLAN_PCP,
> ACTION_OF_POP_MPLS,
> ACTION_OF_PUSH_MPLS,
> + ACTION_VXLAN_ENCAP,
> + ACTION_VXLAN_DECAP,
> ZERO,
> };
>
> @@ -905,6 +925,9 @@ static int parse_vc_action_rss_type(struct context *, const struct token *,
> static int parse_vc_action_rss_queue(struct context *, const struct token *,
> const char *, unsigned int, void *,
> unsigned int);
> +static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
> + const char *, unsigned int, void *,
> + unsigned int);
> static int parse_destroy(struct context *, const struct token *,
> const char *, unsigned int,
> void *, unsigned int);
> @@ -2387,6 +2410,24 @@ static const struct token token_list[] = {
> ethertype)),
> .call = parse_vc_conf,
> },
> + [ACTION_VXLAN_ENCAP] = {
> + .name = "vxlan_encap",
> + .help = "VXLAN encapsulation, uses configuration set by \"set"
> + " vxlan\"",
> + .priv = PRIV_ACTION(VXLAN_ENCAP,
> + sizeof(struct action_vxlan_encap_data)),
> + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
> + .call = parse_vc_action_vxlan_encap,
> + },
> + [ACTION_VXLAN_DECAP] = {
> + .name = "vxlan_decap",
> + .help = "Performs a decapsulation action by stripping all"
> + " headers of the VXLAN tunnel network overlay from the"
> + " matched flow.",
> + .priv = PRIV_ACTION(VXLAN_DECAP, 0),
> + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
> + .call = parse_vc,
> + },
> };
>
> /** Remove and return last entry from argument stack. */
> @@ -2951,6 +2992,94 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
> return len;
> }
>
> +/** Parse VXLAN encap action. */
> +static int
> +parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
> + const char *str, unsigned int len,
> + void *buf, unsigned int size)
> +{
> + struct buffer *out = buf;
> + struct rte_flow_action *action;
> + struct action_vxlan_encap_data *action_vxlan_encap_data;
> + int ret;
> +
> + ret = parse_vc(ctx, token, str, len, buf, size);
> + if (ret < 0)
> + return ret;
> + /* Nothing else to do if there is no buffer. */
> + if (!out)
> + return ret;
> + if (!out->args.vc.actions_n)
> + return -1;
> + action = &out->args.vc.actions[out->args.vc.actions_n - 1];
> + /* Point to selected object. */
> + ctx->object = out->args.vc.data;
> + ctx->objmask = NULL;
> + /* Set up default configuration. */
> + action_vxlan_encap_data = ctx->object;
> + *action_vxlan_encap_data = (struct action_vxlan_encap_data){
> + .conf = (struct rte_flow_action_vxlan_encap){
> + .definition = action_vxlan_encap_data->items,
> + },
> + .items = {
> + {
> + .type = RTE_FLOW_ITEM_TYPE_ETH,
> + .spec = &action_vxlan_encap_data->item_eth,
> + .mask = &action_vxlan_encap_data->item_eth,
> + },
> + {
> + .type = RTE_FLOW_ITEM_TYPE_IPV4,
> + .spec = &action_vxlan_encap_data->item_ipv4,
> + .mask = &action_vxlan_encap_data->item_ipv4,
> + },
> + {
> + .type = RTE_FLOW_ITEM_TYPE_UDP,
> + .spec = &action_vxlan_encap_data->item_udp,
> + .mask = &action_vxlan_encap_data->item_udp,
> + },
> + {
> + .type = RTE_FLOW_ITEM_TYPE_VXLAN,
> + .spec = &action_vxlan_encap_data->item_vxlan,
> + .mask = &action_vxlan_encap_data->item_vxlan,
> + },
> + {
> + .type = RTE_FLOW_ITEM_TYPE_END,
> + },
> + },
> + .item_eth = { .type = 0, },
> + .item_ipv4.hdr = {
> + .src_addr = vxlan_encap_conf.ipv4_src,
> + .dst_addr = vxlan_encap_conf.ipv4_dst,
> + },
> + .item_udp.hdr = {
> + .src_port = vxlan_encap_conf.udp_src,
> + .dst_port = vxlan_encap_conf.udp_dst,
> + },
> + .item_vxlan.flags = 0,
> + };
> + memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
> + vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
> + memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
> + vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
> + if (!vxlan_encap_conf.select_ipv4) {
> + memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
> + &vxlan_encap_conf.ipv6_src,
> + sizeof(vxlan_encap_conf.ipv6_src));
> + memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
> + &vxlan_encap_conf.ipv6_dst,
> + sizeof(vxlan_encap_conf.ipv6_dst));
> + action_vxlan_encap_data->items[1] = (struct rte_flow_item){
> + .type = RTE_FLOW_ITEM_TYPE_IPV6,
> + .spec = &action_vxlan_encap_data->item_ipv6,
> + .mask = &action_vxlan_encap_data->item_ipv6,
> + };
> + }
> + memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
> + RTE_DIM(vxlan_encap_conf.vni));
> + action->conf = &action_vxlan_encap_data->conf;
> + return ret;
> +}
> +
> /** Parse tokens for destroy command. */
> static int
> parse_destroy(struct context *ctx, const struct token *token,
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
> index 35cf26674..1c68c9d30 100644
> --- a/app/test-pmd/testpmd.c
> +++ b/app/test-pmd/testpmd.c
> @@ -393,6 +393,21 @@ uint8_t bitrate_enabled;
> struct gro_status gro_ports[RTE_MAX_ETHPORTS];
> uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
>
> +struct vxlan_encap_conf vxlan_encap_conf = {
> + .select_ipv4 = 1,
> + .vni = "\x00\x00\x00",
> + .udp_src = RTE_BE16(1),
> + .udp_dst = RTE_BE16(4789),
> + .ipv4_src = IPv4(127, 0, 0, 1),
> + .ipv4_dst = IPv4(255, 255, 255, 255),
> + .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
> + "\x00\x00\x00\x00\x00\x00\x00\x01",
> + .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
> + "\x00\x00\x00\x00\x00\x00\x11\x11",
> + .eth_src = "\x00\x00\x00\x00\x00\x00",
> + .eth_dst = "\xff\xff\xff\xff\xff\xff",
> +};
> +
> /* Forward function declarations */
> static void map_port_queue_stats_mapping_registers(portid_t pi,
> struct rte_port *port);
> diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
> index f51cd9dd9..72c4e8d54 100644
> --- a/app/test-pmd/testpmd.h
> +++ b/app/test-pmd/testpmd.h
> @@ -479,6 +479,21 @@ struct gso_status {
> extern struct gso_status gso_ports[RTE_MAX_ETHPORTS];
> extern uint16_t gso_max_segment_size;
>
> +/* VXLAN encap/decap parameters. */
> +struct vxlan_encap_conf {
> + uint32_t select_ipv4:1;
> + uint8_t vni[3];
> + rte_be16_t udp_src;
> + rte_be16_t udp_dst;
> + rte_be32_t ipv4_src;
> + rte_be32_t ipv4_dst;
> + uint8_t ipv6_src[16];
> + uint8_t ipv6_dst[16];
> + uint8_t eth_src[ETHER_ADDR_LEN];
> + uint8_t eth_dst[ETHER_ADDR_LEN];
> +};
> +struct vxlan_encap_conf vxlan_encap_conf;
> +
> static inline unsigned int
> lcore_num(void)
> {
> diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> index 0d6fd50ca..162d1c535 100644
> --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
> @@ -1534,6 +1534,12 @@ Enable or disable a per queue Tx offloading only on a specific Tx queue::
>
> This command should be run when the port is stopped, or else it will fail.
>
> +Config VXLAN Encap outer layers
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +Configure the outer layer to encapsulate a packet inside a VXLAN tunnel::
> +
> + testpmd> set vxlan ipv4|ipv6 (udp-src) (udp-dst) (ip-src) (ip-dst) (mac-src) (mac-dst)
^^^^^^
vxlan vni is missing? Also the VLAN tag id?
>
> Port Functions
> --------------
> @@ -3650,6 +3656,12 @@ This section lists supported actions and their attributes, if any.
>
> - ``ethertype``: Ethertype.
>
> +- ``vxlan_encap``: Performs a VXLAN encapsulation, outer layer configuration
> + is done through `Config VXLAN Encap outer layers`_.
> +
> +- ``vxlan_decap``: Performs a decapsulation action by stripping all headers of
> + the VXLAN tunnel network overlay from the matched flow.
> +
> Destroying flow rules
> ~~~~~~~~~~~~~~~~~~~~~
>
next prev parent reply other threads:[~2018-06-18 12:47 UTC|newest]
Thread overview: 63+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-14 15:08 [dpdk-dev] [PATCH 0/2] implement VXLAN/NVGRE Encap/Decap in testpmd Nelio Laranjeiro
2018-06-14 15:08 ` [dpdk-dev] [PATCH 1/2] app/testpmd: add VXLAN encap/decap support Nelio Laranjeiro
2018-06-14 15:09 ` [dpdk-dev] [PATCH 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-06-15 9:32 ` Iremonger, Bernard
2018-06-15 11:25 ` Nélio Laranjeiro
2018-06-18 8:52 ` [dpdk-dev] [PATCH v2 0/2] app/testpmd implement VXLAN/NVGRE Encap/Decap Nelio Laranjeiro
2018-06-18 9:05 ` Ferruh Yigit
2018-06-18 9:38 ` Nélio Laranjeiro
2018-06-18 14:40 ` Ferruh Yigit
2018-06-19 7:32 ` Nélio Laranjeiro
2018-06-18 14:36 ` [dpdk-dev] [PATCH v3 " Nelio Laranjeiro
2018-06-18 16:28 ` Iremonger, Bernard
2018-06-19 9:41 ` Nélio Laranjeiro
2018-06-21 7:13 ` [dpdk-dev] [PATCH v4 " Nelio Laranjeiro
2018-06-21 7:13 ` [dpdk-dev] [PATCH v4 1/2] app/testpmd: add VXLAN encap/decap support Nelio Laranjeiro
2018-06-26 10:51 ` Ori Kam
2018-06-26 12:43 ` Iremonger, Bernard
2018-06-21 7:13 ` [dpdk-dev] [PATCH v4 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-06-26 10:48 ` Ori Kam
2018-06-26 12:48 ` Iremonger, Bernard
2018-06-26 15:15 ` Nélio Laranjeiro
2018-06-22 7:42 ` [dpdk-dev] [PATCH v4 0/2] app/testpmd implement VXLAN/NVGRE Encap/Decap Mohammad Abdul Awal
2018-06-22 8:31 ` Nélio Laranjeiro
2018-06-22 8:51 ` Mohammad Abdul Awal
2018-06-22 9:08 ` Nélio Laranjeiro
2018-06-22 10:19 ` Mohammad Abdul Awal
2018-06-26 15:15 ` Nélio Laranjeiro
2018-06-27 8:53 ` [dpdk-dev] [PATCH v5 " Nelio Laranjeiro
2018-06-27 8:53 ` [dpdk-dev] [PATCH v5 1/2] app/testpmd: add VXLAN encap/decap support Nelio Laranjeiro
2018-06-27 8:53 ` [dpdk-dev] [PATCH v5 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-06-27 9:53 ` [dpdk-dev] [PATCH v6 0/2] app/testpmd implement VXLAN/NVGRE Encap/Decap Nelio Laranjeiro
2018-06-27 9:53 ` [dpdk-dev] [PATCH v6 1/2] app/testpmd: add VXLAN encap/decap support Nelio Laranjeiro
2018-06-27 9:53 ` [dpdk-dev] [PATCH v6 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-06-27 10:00 ` [dpdk-dev] [PATCH v6 0/2] app/testpmd implement VXLAN/NVGRE Encap/Decap Nélio Laranjeiro
2018-06-27 11:45 ` [dpdk-dev] [PATCH v7 " Nelio Laranjeiro
2018-06-27 11:45 ` [dpdk-dev] [PATCH v7 1/2] app/testpmd: add VXLAN encap/decap support Nelio Laranjeiro
2018-06-27 11:45 ` [dpdk-dev] [PATCH v7 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-07-02 10:40 ` [dpdk-dev] [PATCH v7 0/2] app/testpmd implement VXLAN/NVGRE Encap/Decap Mohammad Abdul Awal
2018-07-04 14:54 ` Ferruh Yigit
2018-07-05 9:37 ` Nélio Laranjeiro
2018-07-05 14:33 ` [dpdk-dev] [PATCH v8 " Nelio Laranjeiro
2018-07-05 14:33 ` [dpdk-dev] [PATCH v8 1/2] app/testpmd: add VXLAN encap/decap support Nelio Laranjeiro
2018-07-05 15:03 ` Mohammad Abdul Awal
2018-07-05 14:33 ` [dpdk-dev] [PATCH v8 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-07-05 15:07 ` Mohammad Abdul Awal
2018-07-05 15:17 ` Nélio Laranjeiro
2018-07-05 14:48 ` [dpdk-dev] [PATCH v8 0/2] app/testpmd implement VXLAN/NVGRE Encap/Decap Adrien Mazarguil
2018-07-05 14:57 ` Mohammad Abdul Awal
2018-07-06 6:43 ` [dpdk-dev] [PATCH v9 " Nelio Laranjeiro
2018-07-06 6:43 ` [dpdk-dev] [PATCH v9 1/2] app/testpmd: add VXLAN encap/decap support Nelio Laranjeiro
2018-07-06 6:43 ` [dpdk-dev] [PATCH v9 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-07-18 8:31 ` [dpdk-dev] [PATCH v9 0/2] app/testpmd implement VXLAN/NVGRE Encap/Decap Ferruh Yigit
2018-06-18 14:36 ` [dpdk-dev] [PATCH v3 1/2] app/testpmd: add VXLAN encap/decap support Nelio Laranjeiro
2018-06-19 7:09 ` Ori Kam
2018-06-19 9:40 ` Nélio Laranjeiro
2018-06-18 14:36 ` [dpdk-dev] [PATCH v3 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-06-19 7:08 ` Ori Kam
2018-06-18 8:52 ` [dpdk-dev] [PATCH v2 1/2] app/testpmd: add VXLAN " Nelio Laranjeiro
2018-06-18 12:47 ` Mohammad Abdul Awal [this message]
2018-06-18 21:02 ` Stephen Hemminger
2018-06-19 9:44 ` Nélio Laranjeiro
2018-06-18 8:52 ` [dpdk-dev] [PATCH v2 2/2] app/testpmd: add NVGRE " Nelio Laranjeiro
2018-06-18 12:48 ` Mohammad Abdul Awal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=43504c63-4fd8-c8fc-55fc-2f81549600b7@intel.com \
--to=mohammad.abdul.awal@intel.com \
--cc=adrien.mazarguil@6wind.com \
--cc=bernard.iremonger@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=nelio.laranjeiro@6wind.com \
--cc=wenzhuo.lu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).