From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, niklas.soderlund@corigine.com,
Chaoyong He <chaoyong.he@corigine.com>
Subject: [PATCH v3 02/26] net/nfp: support IPv4 VXLAN flow item
Date: Tue, 25 Oct 2022 15:58:54 +0800 [thread overview]
Message-ID: <20221025075918.7778-3-chaoyong.he@corigine.com> (raw)
In-Reply-To: <20221025075918.7778-1-chaoyong.he@corigine.com>
Add the corresponding data structure and logics, to support
the offload of IPv4 VXLAN item.
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
doc/guides/nics/features/nfp.ini | 1 +
drivers/net/nfp/flower/nfp_flower_cmsg.h | 35 ++++
drivers/net/nfp/nfp_flow.c | 243 ++++++++++++++++++++---
3 files changed, 246 insertions(+), 33 deletions(-)
diff --git a/doc/guides/nics/features/nfp.ini b/doc/guides/nics/features/nfp.ini
index 0184980e88..faaa7da83c 100644
--- a/doc/guides/nics/features/nfp.ini
+++ b/doc/guides/nics/features/nfp.ini
@@ -35,6 +35,7 @@ sctp = Y
tcp = Y
udp = Y
vlan = Y
+vxlan = Y
[rte_flow actions]
count = Y
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h
index 6bf8ff7d56..08e2873808 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h
@@ -324,6 +324,41 @@ struct nfp_flower_ipv6 {
uint8_t ipv6_dst[16];
};
+struct nfp_flower_tun_ipv4 {
+ rte_be32_t src;
+ rte_be32_t dst;
+};
+
+struct nfp_flower_tun_ip_ext {
+ uint8_t tos;
+ uint8_t ttl;
+};
+
+/*
+ * Flow Frame IPv4 UDP TUNNEL --> Tunnel details (5W/20B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ipv4_udp_tun {
+ struct nfp_flower_tun_ipv4 ipv4;
+ rte_be16_t reserved1;
+ struct nfp_flower_tun_ip_ext ip_ext;
+ rte_be32_t reserved2;
+ rte_be32_t tun_id;
+};
+
struct nfp_fl_act_head {
uint8_t jump_id;
uint8_t len_lw;
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 69fc8be7ed..0e1e5ea6b2 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -38,7 +38,8 @@ struct nfp_flow_item_proc {
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
- bool is_mask);
+ bool is_mask,
+ bool is_outer_layer);
/* List of possible subsequent items. */
const enum rte_flow_item_type *const next_item;
};
@@ -491,6 +492,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
struct nfp_fl_key_ls *key_ls)
{
struct rte_eth_dev *ethdev;
+ bool outer_ip4_flag = false;
const struct rte_flow_item *item;
struct nfp_flower_representor *representor;
const struct rte_flow_item_port_id *port_id;
@@ -526,6 +528,8 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV4 detected");
key_ls->key_layer |= NFP_FLOWER_LAYER_IPV4;
key_ls->key_size += sizeof(struct nfp_flower_ipv4);
+ if (!outer_ip4_flag)
+ outer_ip4_flag = true;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_IPV6 detected");
@@ -547,6 +551,21 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],
key_ls->key_layer |= NFP_FLOWER_LAYER_TP;
key_ls->key_size += sizeof(struct nfp_flower_tp_ports);
break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ PMD_DRV_LOG(DEBUG, "RTE_FLOW_ITEM_TYPE_VXLAN detected");
+ /* Clear IPv4 bits */
+ key_ls->key_layer &= ~NFP_FLOWER_LAYER_IPV4;
+ key_ls->tun_type = NFP_FL_TUN_VXLAN;
+ key_ls->key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ if (outer_ip4_flag) {
+ key_ls->key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+ /*
+ * The outer l3 layer information is
+ * in `struct nfp_flower_ipv4_udp_tun`
+ */
+ key_ls->key_size -= sizeof(struct nfp_flower_ipv4);
+ }
+ break;
default:
PMD_DRV_LOG(ERR, "Item type %d not supported.", item->type);
return -ENOTSUP;
@@ -719,12 +738,25 @@ nfp_flow_key_layers_calculate(const struct rte_flow_item items[],
return ret;
}
+static bool
+nfp_flow_is_tunnel(struct rte_flow *nfp_flow)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+
+ meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+ if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_VXLAN)
+ return true;
+
+ return false;
+}
+
static int
nfp_flow_merge_eth(__rte_unused struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
- bool is_mask)
+ bool is_mask,
+ __rte_unused bool is_outer_layer)
{
struct nfp_flower_mac_mpls *eth;
const struct rte_flow_item_eth *spec;
@@ -760,7 +792,8 @@ nfp_flow_merge_vlan(struct rte_flow *nfp_flow,
__rte_unused char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
- bool is_mask)
+ bool is_mask,
+ __rte_unused bool is_outer_layer)
{
struct nfp_flower_meta_tci *meta_tci;
const struct rte_flow_item_vlan *spec;
@@ -789,41 +822,58 @@ nfp_flow_merge_ipv4(struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
- bool is_mask)
+ bool is_mask,
+ bool is_outer_layer)
{
struct nfp_flower_ipv4 *ipv4;
const struct rte_ipv4_hdr *hdr;
struct nfp_flower_meta_tci *meta_tci;
const struct rte_flow_item_ipv4 *spec;
const struct rte_flow_item_ipv4 *mask;
+ struct nfp_flower_ipv4_udp_tun *ipv4_udp_tun;
spec = item->spec;
mask = item->mask ? item->mask : proc->mask_default;
meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
- if (spec == NULL) {
- PMD_DRV_LOG(DEBUG, "nfp flow merge ipv4: no item->spec!");
- goto ipv4_end;
- }
+ if (is_outer_layer && nfp_flow_is_tunnel(nfp_flow)) {
+ if (spec == NULL) {
+ PMD_DRV_LOG(DEBUG, "nfp flow merge ipv4: no item->spec!");
+ return 0;
+ }
- /*
- * reserve space for L4 info.
- * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4
- */
- if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
- *mbuf_off += sizeof(struct nfp_flower_tp_ports);
+ hdr = is_mask ? &mask->hdr : &spec->hdr;
+ ipv4_udp_tun = (struct nfp_flower_ipv4_udp_tun *)*mbuf_off;
- hdr = is_mask ? &mask->hdr : &spec->hdr;
- ipv4 = (struct nfp_flower_ipv4 *)*mbuf_off;
+ ipv4_udp_tun->ip_ext.tos = hdr->type_of_service;
+ ipv4_udp_tun->ip_ext.ttl = hdr->time_to_live;
+ ipv4_udp_tun->ipv4.src = hdr->src_addr;
+ ipv4_udp_tun->ipv4.dst = hdr->dst_addr;
+ } else {
+ if (spec == NULL) {
+ PMD_DRV_LOG(DEBUG, "nfp flow merge ipv4: no item->spec!");
+ goto ipv4_end;
+ }
+
+ /*
+ * reserve space for L4 info.
+ * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4
+ */
+ if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+ *mbuf_off += sizeof(struct nfp_flower_tp_ports);
+
+ hdr = is_mask ? &mask->hdr : &spec->hdr;
+ ipv4 = (struct nfp_flower_ipv4 *)*mbuf_off;
- ipv4->ip_ext.tos = hdr->type_of_service;
- ipv4->ip_ext.proto = hdr->next_proto_id;
- ipv4->ip_ext.ttl = hdr->time_to_live;
- ipv4->ipv4_src = hdr->src_addr;
- ipv4->ipv4_dst = hdr->dst_addr;
+ ipv4->ip_ext.tos = hdr->type_of_service;
+ ipv4->ip_ext.proto = hdr->next_proto_id;
+ ipv4->ip_ext.ttl = hdr->time_to_live;
+ ipv4->ipv4_src = hdr->src_addr;
+ ipv4->ipv4_dst = hdr->dst_addr;
ipv4_end:
- *mbuf_off += sizeof(struct nfp_flower_ipv4);
+ *mbuf_off += sizeof(struct nfp_flower_ipv4);
+ }
return 0;
}
@@ -833,7 +883,8 @@ nfp_flow_merge_ipv6(struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
- bool is_mask)
+ bool is_mask,
+ __rte_unused bool is_outer_layer)
{
struct nfp_flower_ipv6 *ipv6;
const struct rte_ipv6_hdr *hdr;
@@ -878,7 +929,8 @@ nfp_flow_merge_tcp(struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
- bool is_mask)
+ bool is_mask,
+ __rte_unused bool is_outer_layer)
{
uint8_t tcp_flags;
struct nfp_flower_tp_ports *ports;
@@ -950,7 +1002,8 @@ nfp_flow_merge_udp(struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
- bool is_mask)
+ bool is_mask,
+ bool is_outer_layer)
{
char *ports_off;
struct nfp_flower_tp_ports *ports;
@@ -964,6 +1017,12 @@ nfp_flow_merge_udp(struct rte_flow *nfp_flow,
return 0;
}
+ /* Don't add L4 info if working on a inner layer pattern */
+ if (!is_outer_layer) {
+ PMD_DRV_LOG(INFO, "Detected inner layer UDP, skipping.");
+ return 0;
+ }
+
meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) {
ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv4) -
@@ -991,7 +1050,8 @@ nfp_flow_merge_sctp(struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
- bool is_mask)
+ bool is_mask,
+ __rte_unused bool is_outer_layer)
{
char *ports_off;
struct nfp_flower_tp_ports *ports;
@@ -1027,10 +1087,42 @@ nfp_flow_merge_sctp(struct rte_flow *nfp_flow,
return 0;
}
+static int
+nfp_flow_merge_vxlan(__rte_unused struct rte_flow *nfp_flow,
+ char **mbuf_off,
+ const struct rte_flow_item *item,
+ const struct nfp_flow_item_proc *proc,
+ bool is_mask,
+ __rte_unused bool is_outer_layer)
+{
+ const struct rte_vxlan_hdr *hdr;
+ struct nfp_flower_ipv4_udp_tun *tun4;
+ const struct rte_flow_item_vxlan *spec;
+ const struct rte_flow_item_vxlan *mask;
+
+ spec = item->spec;
+ if (spec == NULL) {
+ PMD_DRV_LOG(DEBUG, "nfp flow merge vxlan: no item->spec!");
+ goto vxlan_end;
+ }
+
+ mask = item->mask ? item->mask : proc->mask_default;
+ hdr = is_mask ? &mask->hdr : &spec->hdr;
+
+ tun4 = (struct nfp_flower_ipv4_udp_tun *)*mbuf_off;
+ tun4->tun_id = hdr->vx_vni;
+
+vxlan_end:
+ *mbuf_off += sizeof(struct nfp_flower_ipv4_udp_tun);
+
+ return 0;
+}
+
/* Graph of supported items and associated process function */
static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
[RTE_FLOW_ITEM_TYPE_END] = {
- .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4),
},
[RTE_FLOW_ITEM_TYPE_ETH] = {
.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
@@ -1113,6 +1205,7 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
.merge = nfp_flow_merge_tcp,
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VXLAN),
.mask_support = &(const struct rte_flow_item_udp){
.hdr = {
.src_port = RTE_BE16(0xffff),
@@ -1134,6 +1227,17 @@ static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
.mask_sz = sizeof(struct rte_flow_item_sctp),
.merge = nfp_flow_merge_sctp,
},
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
+ .mask_support = &(const struct rte_flow_item_vxlan){
+ .hdr = {
+ .vx_vni = RTE_BE32(0xffffff00),
+ },
+ },
+ .mask_default = &rte_flow_item_vxlan_mask,
+ .mask_sz = sizeof(struct rte_flow_item_vxlan),
+ .merge = nfp_flow_merge_vxlan,
+ },
};
static int
@@ -1187,21 +1291,53 @@ nfp_flow_item_check(const struct rte_flow_item *item,
return ret;
}
+static bool
+nfp_flow_is_tun_item(const struct rte_flow_item *item)
+{
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+ return true;
+
+ return false;
+}
+
+static bool
+nfp_flow_inner_item_get(const struct rte_flow_item items[],
+ const struct rte_flow_item **inner_item)
+{
+ const struct rte_flow_item *item;
+
+ *inner_item = items;
+
+ for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
+ if (nfp_flow_is_tun_item(item)) {
+ *inner_item = ++item;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int
nfp_flow_compile_item_proc(const struct rte_flow_item items[],
struct rte_flow *nfp_flow,
char **mbuf_off_exact,
- char **mbuf_off_mask)
+ char **mbuf_off_mask,
+ bool is_outer_layer)
{
int i;
int ret = 0;
+ bool continue_flag = true;
const struct rte_flow_item *item;
const struct nfp_flow_item_proc *proc_list;
proc_list = nfp_flow_item_proc_list;
- for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
+ for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END && continue_flag; ++item) {
const struct nfp_flow_item_proc *proc = NULL;
+ if (nfp_flow_is_tun_item(item))
+ continue_flag = false;
+
for (i = 0; proc_list->next_item && proc_list->next_item[i]; ++i) {
if (proc_list->next_item[i] == item->type) {
proc = &nfp_flow_item_proc_list[item->type];
@@ -1230,14 +1366,14 @@ nfp_flow_compile_item_proc(const struct rte_flow_item items[],
}
ret = proc->merge(nfp_flow, mbuf_off_exact, item,
- proc, false);
+ proc, false, is_outer_layer);
if (ret != 0) {
PMD_DRV_LOG(ERR, "nfp flow item %d exact merge failed", item->type);
break;
}
ret = proc->merge(nfp_flow, mbuf_off_mask, item,
- proc, true);
+ proc, true, is_outer_layer);
if (ret != 0) {
PMD_DRV_LOG(ERR, "nfp flow item %d mask merge failed", item->type);
break;
@@ -1257,6 +1393,9 @@ nfp_flow_compile_items(__rte_unused struct nfp_flower_representor *representor,
int ret;
char *mbuf_off_mask;
char *mbuf_off_exact;
+ bool is_tun_flow = false;
+ bool is_outer_layer = true;
+ const struct rte_flow_item *loop_item;
mbuf_off_exact = nfp_flow->payload.unmasked_data +
sizeof(struct nfp_flower_meta_tci) +
@@ -1265,14 +1404,29 @@ nfp_flow_compile_items(__rte_unused struct nfp_flower_representor *representor,
sizeof(struct nfp_flower_meta_tci) +
sizeof(struct nfp_flower_in_port);
+ /* Check if this is a tunnel flow and get the inner item*/
+ is_tun_flow = nfp_flow_inner_item_get(items, &loop_item);
+ if (is_tun_flow)
+ is_outer_layer = false;
+
/* Go over items */
- ret = nfp_flow_compile_item_proc(items, nfp_flow,
- &mbuf_off_exact, &mbuf_off_mask);
+ ret = nfp_flow_compile_item_proc(loop_item, nfp_flow,
+ &mbuf_off_exact, &mbuf_off_mask, is_outer_layer);
if (ret != 0) {
PMD_DRV_LOG(ERR, "nfp flow item compile failed.");
return -EINVAL;
}
+ /* Go over inner items */
+ if (is_tun_flow) {
+ ret = nfp_flow_compile_item_proc(items, nfp_flow,
+ &mbuf_off_exact, &mbuf_off_mask, true);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "nfp flow outer item compile failed.");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -2119,12 +2273,35 @@ nfp_flow_query(struct rte_eth_dev *dev,
return 0;
}
+static int
+nfp_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_tunnel *tunnel,
+ __rte_unused struct rte_flow_item **pmd_items,
+ uint32_t *num_of_items,
+ __rte_unused struct rte_flow_error *err)
+{
+ *num_of_items = 0;
+
+ return 0;
+}
+
+static int
+nfp_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct rte_flow_item *pmd_items,
+ __rte_unused uint32_t num_of_items,
+ __rte_unused struct rte_flow_error *err)
+{
+ return 0;
+}
+
static const struct rte_flow_ops nfp_flow_ops = {
.validate = nfp_flow_validate,
.create = nfp_flow_create,
.destroy = nfp_flow_destroy,
.flush = nfp_flow_flush,
.query = nfp_flow_query,
+ .tunnel_match = nfp_flow_tunnel_match,
+ .tunnel_item_release = nfp_flow_tunnel_item_release,
};
int
--
2.29.3
next prev parent reply other threads:[~2022-10-25 8:00 UTC|newest]
Thread overview: 88+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-18 3:22 [PATCH 00/25] add the extend rte_flow offload support of nfp PMD Chaoyong He
2022-10-18 3:22 ` [PATCH 01/25] net/nfp: add the offload support of IPv4 VXLAN item Chaoyong He
2022-10-18 3:22 ` [PATCH 02/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 03/25] net/nfp: prepare for the encap action of IPv4 tunnel Chaoyong He
2022-10-18 3:22 ` [PATCH 04/25] net/nfp: prepare for the encap action of IPv6 tunnel Chaoyong He
2022-10-18 3:22 ` [PATCH 05/25] net/nfp: add the offload support of IPv4 VXLAN encap action Chaoyong He
2022-10-18 3:22 ` [PATCH 06/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 07/25] net/nfp: prepare for the decap action of IPv4 UDP tunnel Chaoyong He
2022-10-18 3:22 ` [PATCH 08/25] net/nfp: prepare for the decap action of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 09/25] net/nfp: add the offload support of IPv4 VXLAN decap action Chaoyong He
2022-10-18 3:22 ` [PATCH 10/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 11/25] net/nfp: add the offload support of IPv4 GENEVE encap action Chaoyong He
2022-10-18 3:22 ` [PATCH 12/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 13/25] net/nfp: add the offload support of IPv4 GENEVE item Chaoyong He
2022-10-18 3:22 ` [PATCH 14/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 15/25] net/nfp: add the offload support of IPv4 GENEVE decap action Chaoyong He
2022-10-18 3:22 ` [PATCH 16/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 17/25] net/nfp: add the offload support of IPv4 NVGRE encap action Chaoyong He
2022-10-18 3:22 ` [PATCH 18/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 19/25] net/nfp: prepare for the decap action of IPv4 GRE tunnel Chaoyong He
2022-10-18 3:22 ` [PATCH 20/25] net/nfp: prepare for the decap action of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 21/25] net/nfp: add the offload support of IPv4 NVGRE item Chaoyong He
2022-10-18 3:22 ` [PATCH 22/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 23/25] net/nfp: add the offload support of IPv4 NVGRE decap action Chaoyong He
2022-10-18 3:22 ` [PATCH 24/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 25/25] net/nfp: add the support of new tunnel solution Chaoyong He
2022-10-21 13:37 ` [PATCH 00/25] add the extend rte_flow offload support of nfp PMD Ferruh Yigit
2022-10-21 13:39 ` Ferruh Yigit
2022-10-22 8:24 ` [PATCH v2 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 01/25] net/nfp: support IPv4 VXLAN flow item Chaoyong He
2022-10-22 8:24 ` [PATCH v2 02/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 03/25] net/nfp: prepare for IPv4 tunnel encap flow action Chaoyong He
2022-10-22 8:24 ` [PATCH v2 04/25] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 05/25] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 06/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 07/25] net/nfp: prepare for IPv4 UDP tunnel decap " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 08/25] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 09/25] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 10/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 11/25] net/nfp: support IPv4 GENEVE encap " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 12/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 13/25] net/nfp: support IPv4 GENEVE flow item Chaoyong He
2022-10-22 8:24 ` [PATCH v2 14/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 15/25] net/nfp: support IPv4 GENEVE decap flow action Chaoyong He
2022-10-22 8:24 ` [PATCH v2 16/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 17/25] net/nfp: support IPv4 NVGRE encap " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 18/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 19/25] net/nfp: prepare for IPv4 GRE tunnel decap " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 20/25] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 21/25] net/nfp: support IPv4 NVGRE flow item Chaoyong He
2022-10-22 8:24 ` [PATCH v2 22/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 23/25] net/nfp: support IPv4 NVGRE decap flow action Chaoyong He
2022-10-22 8:24 ` [PATCH v2 24/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 25/25] net/nfp: support new tunnel solution Chaoyong He
2022-10-24 15:09 ` Ferruh Yigit
2022-10-25 1:44 ` Chaoyong He
2022-10-25 8:18 ` Ferruh Yigit
2022-10-24 15:07 ` [PATCH v2 00/25] add the extend rte_flow offload support of nfp PMD Ferruh Yigit
2022-10-25 3:17 ` Chaoyong He
2022-10-25 3:29 ` Chaoyong He
2022-10-25 7:58 ` [PATCH v3 00/26] " Chaoyong He
2022-10-25 7:58 ` [PATCH v3 01/26] net/nfp: fix the app stuck by CPP bridge service Chaoyong He
2022-10-25 7:58 ` Chaoyong He [this message]
2022-10-25 7:58 ` [PATCH v3 03/26] net/nfp: support IPv6 VXLAN flow item Chaoyong He
2022-10-25 7:58 ` [PATCH v3 04/26] net/nfp: prepare for IPv4 tunnel encap flow action Chaoyong He
2022-10-25 7:58 ` [PATCH v3 05/26] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-25 7:58 ` [PATCH v3 06/26] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-25 7:58 ` [PATCH v3 07/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 08/26] net/nfp: prepare for IPv4 UDP tunnel decap " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 09/26] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 10/26] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 11/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 12/26] net/nfp: support IPv4 GENEVE encap " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 13/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 14/26] net/nfp: support IPv4 GENEVE flow item Chaoyong He
2022-10-25 7:59 ` [PATCH v3 15/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 16/26] net/nfp: support IPv4 GENEVE decap flow action Chaoyong He
2022-10-25 7:59 ` [PATCH v3 17/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 18/26] net/nfp: support IPv4 NVGRE encap " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 19/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 20/26] net/nfp: prepare for IPv4 GRE tunnel decap " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 21/26] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 22/26] net/nfp: support IPv4 NVGRE flow item Chaoyong He
2022-10-25 7:59 ` [PATCH v3 23/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 24/26] net/nfp: support IPv4 NVGRE decap flow action Chaoyong He
2022-10-25 7:59 ` [PATCH v3 25/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 26/26] net/nfp: support new solution for tunnel decap action Chaoyong He
2022-10-25 11:42 ` [PATCH v3 00/26] add the extend rte_flow offload support of nfp PMD Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221025075918.7778-3-chaoyong.he@corigine.com \
--to=chaoyong.he@corigine.com \
--cc=dev@dpdk.org \
--cc=niklas.soderlund@corigine.com \
--cc=oss-drivers@corigine.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).