DPDK patches and discussions
 help / color / mirror / Atom feed
From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, niklas.soderlund@corigine.com,
	Chaoyong He <chaoyong.he@corigine.com>
Subject: [PATCH v3 09/26] net/nfp: prepare for IPv6 UDP tunnel decap flow action
Date: Tue, 25 Oct 2022 15:59:01 +0800	[thread overview]
Message-ID: <20221025075918.7778-10-chaoyong.he@corigine.com> (raw)
In-Reply-To: <20221025075918.7778-1-chaoyong.he@corigine.com>

Add the related data structure and functions, prepare for
the decap action of IPv6 UDP tunnel.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower_cmsg.c |  42 +++++++
 drivers/net/nfp/flower/nfp_flower_cmsg.h |  24 ++++
 drivers/net/nfp/nfp_flow.c               | 145 ++++++++++++++++++++++-
 drivers/net/nfp/nfp_flow.h               |   9 ++
 4 files changed, 217 insertions(+), 3 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c
index f18f3de042..76815cfe14 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.c
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c
@@ -347,6 +347,48 @@ nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower)
 	return 0;
 }
 
+int
+nfp_flower_cmsg_tun_off_v6(struct nfp_app_fw_flower *app_fw_flower)
+{
+	uint16_t cnt;
+	uint32_t count = 0;
+	struct rte_mbuf *mbuf;
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv6_addr_entry *entry;
+	struct nfp_flower_cmsg_tun_ipv6_addr *msg;
+
+	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+	if (mbuf == NULL) {
+		PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v6 tun addr");
+		return -ENOMEM;
+	}
+
+	msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_TUN_IPS_V6, sizeof(*msg));
+
+	priv = app_fw_flower->flow_priv;
+	rte_spinlock_lock(&priv->ipv6_off_lock);
+	LIST_FOREACH(entry, &priv->ipv6_off_list, next) {
+		if (count >= NFP_FL_IPV6_ADDRS_MAX) {
+			rte_spinlock_unlock(&priv->ipv6_off_lock);
+			PMD_DRV_LOG(ERR, "IPv6 offload exceeds limit.");
+			return -ERANGE;
+		}
+		memcpy(&msg->ipv6_addr[count * 16], entry->ipv6_addr, 16UL);
+		count++;
+	}
+	msg->count = rte_cpu_to_be_32(count);
+	rte_spinlock_unlock(&priv->ipv6_off_lock);
+
+	cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+	if (cnt == 0) {
+		PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+		rte_pktmbuf_free(mbuf);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 int
 nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_fl_rule_metadata *nfp_flow_meta,
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h
index 0933dacfb1..61f2f83fc9 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h
@@ -280,6 +280,29 @@ struct nfp_flower_cmsg_tun_ipv4_addr {
 	rte_be32_t ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
 };
 
+#define NFP_FL_IPV6_ADDRS_MAX        4
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_TUN_IP_V6
+ *    Bit    3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ *    -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ *          +---------------------------------------------------------------+
+ *        0 |                    Number of IP Addresses                     |
+ *          +---------------------------------------------------------------+
+ *        1 |                        IP Address1 #1                         |
+ *          +---------------------------------------------------------------+
+ *        2 |                        IP Address1 #2                         |
+ *          +---------------------------------------------------------------+
+ *          |                             ...                               |
+ *          +---------------------------------------------------------------+
+ *       16 |                        IP Address4 #4                         |
+ *          +---------------------------------------------------------------+
+ */
+struct nfp_flower_cmsg_tun_ipv6_addr {
+	rte_be32_t count;
+	uint8_t ipv6_addr[NFP_FL_IPV6_ADDRS_MAX * 16];
+};
+
 /*
  * NFP_FLOWER_CMSG_TYPE_FLOW_STATS
  *    Bit    3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
@@ -802,6 +825,7 @@ int nfp_flower_cmsg_tun_neigh_v4_rule(struct nfp_app_fw_flower *app_fw_flower,
 int nfp_flower_cmsg_tun_neigh_v6_rule(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_flower_cmsg_tun_neigh_v6 *payload);
 int nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower);
+int nfp_flower_cmsg_tun_off_v6(struct nfp_app_fw_flower *app_fw_flower);
 int nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_fl_rule_metadata *nfp_flow_meta,
 		uint16_t mac_idx,
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index c088d24413..ad484b95b7 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -476,16 +476,95 @@ nfp_tun_del_ipv4_off(struct nfp_app_fw_flower *app_fw_flower,
 	return 0;
 }
 
+__rte_unused static int
+nfp_tun_add_ipv6_off(struct nfp_app_fw_flower *app_fw_flower,
+		uint8_t ipv6[])
+{
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv6_addr_entry *entry;
+	struct nfp_ipv6_addr_entry *tmp_entry;
+
+	priv = app_fw_flower->flow_priv;
+
+	rte_spinlock_lock(&priv->ipv6_off_lock);
+	LIST_FOREACH(entry, &priv->ipv6_off_list, next) {
+		if (!memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr))) {
+			entry->ref_count++;
+			rte_spinlock_unlock(&priv->ipv6_off_lock);
+			return 0;
+		}
+	}
+	rte_spinlock_unlock(&priv->ipv6_off_lock);
+
+	tmp_entry = rte_zmalloc("nfp_ipv6_off", sizeof(struct nfp_ipv6_addr_entry), 0);
+	if (tmp_entry == NULL) {
+		PMD_DRV_LOG(ERR, "Mem error when offloading IP6 address.");
+		return -ENOMEM;
+	}
+	memcpy(tmp_entry->ipv6_addr, ipv6, sizeof(tmp_entry->ipv6_addr));
+	tmp_entry->ref_count = 1;
+
+	rte_spinlock_lock(&priv->ipv6_off_lock);
+	LIST_INSERT_HEAD(&priv->ipv6_off_list, tmp_entry, next);
+	rte_spinlock_unlock(&priv->ipv6_off_lock);
+
+	return nfp_flower_cmsg_tun_off_v6(app_fw_flower);
+}
+
+static int
+nfp_tun_del_ipv6_off(struct nfp_app_fw_flower *app_fw_flower,
+		uint8_t ipv6[])
+{
+	struct nfp_flow_priv *priv;
+	struct nfp_ipv6_addr_entry *entry;
+
+	priv = app_fw_flower->flow_priv;
+
+	rte_spinlock_lock(&priv->ipv6_off_lock);
+	LIST_FOREACH(entry, &priv->ipv6_off_list, next) {
+		if (!memcmp(entry->ipv6_addr, ipv6, sizeof(entry->ipv6_addr))) {
+			entry->ref_count--;
+			if (entry->ref_count == 0) {
+				LIST_REMOVE(entry, next);
+				rte_free(entry);
+				rte_spinlock_unlock(&priv->ipv6_off_lock);
+				return nfp_flower_cmsg_tun_off_v6(app_fw_flower);
+			}
+			break;
+		}
+	}
+	rte_spinlock_unlock(&priv->ipv6_off_lock);
+
+	return 0;
+}
+
 static int
 nfp_tun_check_ip_off_del(struct nfp_flower_representor *repr,
 		struct rte_flow *nfp_flow)
 {
 	int ret;
+	uint32_t key_layer2 = 0;
 	struct nfp_flower_ipv4_udp_tun *udp4;
+	struct nfp_flower_ipv6_udp_tun *udp6;
+	struct nfp_flower_meta_tci *meta_tci;
+	struct nfp_flower_ext_meta *ext_meta = NULL;
 
-	udp4 = (struct nfp_flower_ipv4_udp_tun *)(nfp_flow->payload.mask_data -
-			sizeof(struct nfp_flower_ipv4_udp_tun));
-	ret = nfp_tun_del_ipv4_off(repr->app_fw_flower, udp4->ipv4.dst);
+	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+	if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_EXT_META)
+		ext_meta = (struct nfp_flower_ext_meta *)(meta_tci + 1);
+
+	if (ext_meta != NULL)
+		key_layer2 = rte_be_to_cpu_32(ext_meta->nfp_flow_key_layer2);
+
+	if (key_layer2 & NFP_FLOWER_LAYER2_TUN_IPV6) {
+		udp6 = (struct nfp_flower_ipv6_udp_tun *)(nfp_flow->payload.mask_data -
+				sizeof(struct nfp_flower_ipv6_udp_tun));
+		ret = nfp_tun_del_ipv6_off(repr->app_fw_flower, udp6->ipv6.ipv6_dst);
+	} else {
+		udp4 = (struct nfp_flower_ipv4_udp_tun *)(nfp_flow->payload.mask_data -
+				sizeof(struct nfp_flower_ipv4_udp_tun));
+		ret = nfp_tun_del_ipv4_off(repr->app_fw_flower, udp4->ipv4.dst);
+	}
 
 	return ret;
 }
@@ -2078,6 +2157,59 @@ nfp_flower_add_tun_neigh_v6_encap(struct nfp_app_fw_flower *app_fw_flower,
 	return nfp_flower_cmsg_tun_neigh_v6_rule(app_fw_flower, &payload);
 }
 
+__rte_unused static int
+nfp_flower_add_tun_neigh_v6_decap(struct nfp_app_fw_flower *app_fw_flower,
+		struct rte_flow *nfp_flow)
+{
+	struct nfp_fl_tun *tmp;
+	struct nfp_fl_tun *tun;
+	struct nfp_flow_priv *priv;
+	struct nfp_flower_ipv6 *ipv6;
+	struct nfp_flower_mac_mpls *eth;
+	struct nfp_flower_in_port *port;
+	struct nfp_flower_meta_tci *meta_tci;
+	struct nfp_flower_cmsg_tun_neigh_v6 payload;
+
+	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+	port = (struct nfp_flower_in_port *)(meta_tci + 1);
+	eth = (struct nfp_flower_mac_mpls *)(port + 1);
+
+	if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+		ipv6 = (struct nfp_flower_ipv6 *)((char *)eth +
+				sizeof(struct nfp_flower_mac_mpls) +
+				sizeof(struct nfp_flower_tp_ports));
+	else
+		ipv6 = (struct nfp_flower_ipv6 *)((char *)eth +
+				sizeof(struct nfp_flower_mac_mpls));
+
+	tun = &nfp_flow->tun;
+	tun->payload.v6_flag = 1;
+	memcpy(tun->payload.dst.dst_ipv6, ipv6->ipv6_src, sizeof(tun->payload.dst.dst_ipv6));
+	memcpy(tun->payload.src.src_ipv6, ipv6->ipv6_dst, sizeof(tun->payload.src.src_ipv6));
+	memcpy(tun->payload.dst_addr, eth->mac_src, RTE_ETHER_ADDR_LEN);
+	memcpy(tun->payload.src_addr, eth->mac_dst, RTE_ETHER_ADDR_LEN);
+
+	tun->ref_cnt = 1;
+	priv = app_fw_flower->flow_priv;
+	LIST_FOREACH(tmp, &priv->nn_list, next) {
+		if (memcmp(&tmp->payload, &tun->payload, sizeof(struct nfp_fl_tun_entry)) == 0) {
+			tmp->ref_cnt++;
+			return 0;
+		}
+	}
+
+	LIST_INSERT_HEAD(&priv->nn_list, tun, next);
+
+	memset(&payload, 0, sizeof(struct nfp_flower_cmsg_tun_neigh_v6));
+	memcpy(payload.dst_ipv6, ipv6->ipv6_src, sizeof(payload.dst_ipv6));
+	memcpy(payload.src_ipv6, ipv6->ipv6_dst, sizeof(payload.src_ipv6));
+	memcpy(payload.common.dst_mac, eth->mac_src, RTE_ETHER_ADDR_LEN);
+	memcpy(payload.common.src_mac, eth->mac_dst, RTE_ETHER_ADDR_LEN);
+	payload.common.port_id = port->in_port;
+
+	return nfp_flower_cmsg_tun_neigh_v6_rule(app_fw_flower, &payload);
+}
+
 static int
 nfp_flower_del_tun_neigh_v6(struct nfp_app_fw_flower *app_fw_flower,
 		uint8_t *ipv6)
@@ -2401,6 +2533,9 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr,
 	nfp_mac_idx = (find_entry->mac_index << 8) |
 			NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT |
 			NFP_TUN_PRE_TUN_IDX_BIT;
+	if (nfp_flow->tun.payload.v6_flag != 0)
+		nfp_mac_idx |= NFP_TUN_PRE_TUN_IPV6_BIT;
+
 	ret = nfp_flower_cmsg_tun_mac_rule(repr->app_fw_flower, &repr->mac_addr,
 			nfp_mac_idx, true);
 	if (ret != 0) {
@@ -3263,6 +3398,10 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 	rte_spinlock_init(&priv->ipv4_off_lock);
 	LIST_INIT(&priv->ipv4_off_list);
 
+	/* ipv6 off list */
+	rte_spinlock_init(&priv->ipv6_off_lock);
+	LIST_INIT(&priv->ipv6_off_list);
+
 	/* neighbor next list */
 	LIST_INIT(&priv->nn_list);
 
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index f536da2650..a6994e08ee 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -177,6 +177,12 @@ struct nfp_ipv4_addr_entry {
 	int ref_count;
 };
 
+struct nfp_ipv6_addr_entry {
+	LIST_ENTRY(nfp_ipv6_addr_entry) next;
+	uint8_t ipv6_addr[16];
+	int ref_count;
+};
+
 #define NFP_TUN_PRE_TUN_RULE_LIMIT  32
 
 struct nfp_flow_priv {
@@ -201,6 +207,9 @@ struct nfp_flow_priv {
 	/* IPv4 off */
 	LIST_HEAD(, nfp_ipv4_addr_entry) ipv4_off_list; /**< Store ipv4 off */
 	rte_spinlock_t ipv4_off_lock; /**< Lock the ipv4 off list */
+	/* IPv6 off */
+	LIST_HEAD(, nfp_ipv6_addr_entry) ipv6_off_list; /**< Store ipv6 off */
+	rte_spinlock_t ipv6_off_lock; /**< Lock the ipv6 off list */
 	/* neighbor next */
 	LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */
 };
-- 
2.29.3


  parent reply	other threads:[~2022-10-25  8:01 UTC|newest]

Thread overview: 88+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-18  3:22 [PATCH 00/25] add the extend rte_flow offload support of nfp PMD Chaoyong He
2022-10-18  3:22 ` [PATCH 01/25] net/nfp: add the offload support of IPv4 VXLAN item Chaoyong He
2022-10-18  3:22 ` [PATCH 02/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 03/25] net/nfp: prepare for the encap action of IPv4 tunnel Chaoyong He
2022-10-18  3:22 ` [PATCH 04/25] net/nfp: prepare for the encap action of IPv6 tunnel Chaoyong He
2022-10-18  3:22 ` [PATCH 05/25] net/nfp: add the offload support of IPv4 VXLAN encap action Chaoyong He
2022-10-18  3:22 ` [PATCH 06/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 07/25] net/nfp: prepare for the decap action of IPv4 UDP tunnel Chaoyong He
2022-10-18  3:22 ` [PATCH 08/25] net/nfp: prepare for the decap action of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 09/25] net/nfp: add the offload support of IPv4 VXLAN decap action Chaoyong He
2022-10-18  3:22 ` [PATCH 10/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 11/25] net/nfp: add the offload support of IPv4 GENEVE encap action Chaoyong He
2022-10-18  3:22 ` [PATCH 12/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 13/25] net/nfp: add the offload support of IPv4 GENEVE item Chaoyong He
2022-10-18  3:22 ` [PATCH 14/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 15/25] net/nfp: add the offload support of IPv4 GENEVE decap action Chaoyong He
2022-10-18  3:22 ` [PATCH 16/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 17/25] net/nfp: add the offload support of IPv4 NVGRE encap action Chaoyong He
2022-10-18  3:22 ` [PATCH 18/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 19/25] net/nfp: prepare for the decap action of IPv4 GRE tunnel Chaoyong He
2022-10-18  3:22 ` [PATCH 20/25] net/nfp: prepare for the decap action of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 21/25] net/nfp: add the offload support of IPv4 NVGRE item Chaoyong He
2022-10-18  3:22 ` [PATCH 22/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 23/25] net/nfp: add the offload support of IPv4 NVGRE decap action Chaoyong He
2022-10-18  3:22 ` [PATCH 24/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18  3:22 ` [PATCH 25/25] net/nfp: add the support of new tunnel solution Chaoyong He
2022-10-21 13:37 ` [PATCH 00/25] add the extend rte_flow offload support of nfp PMD Ferruh Yigit
2022-10-21 13:39   ` Ferruh Yigit
2022-10-22  8:24 ` [PATCH v2 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 01/25] net/nfp: support IPv4 VXLAN flow item Chaoyong He
2022-10-22  8:24   ` [PATCH v2 02/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 03/25] net/nfp: prepare for IPv4 tunnel encap flow action Chaoyong He
2022-10-22  8:24   ` [PATCH v2 04/25] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 05/25] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 06/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 07/25] net/nfp: prepare for IPv4 UDP tunnel decap " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 08/25] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 09/25] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 10/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 11/25] net/nfp: support IPv4 GENEVE encap " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 12/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 13/25] net/nfp: support IPv4 GENEVE flow item Chaoyong He
2022-10-22  8:24   ` [PATCH v2 14/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 15/25] net/nfp: support IPv4 GENEVE decap flow action Chaoyong He
2022-10-22  8:24   ` [PATCH v2 16/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 17/25] net/nfp: support IPv4 NVGRE encap " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 18/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 19/25] net/nfp: prepare for IPv4 GRE tunnel decap " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 20/25] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 21/25] net/nfp: support IPv4 NVGRE flow item Chaoyong He
2022-10-22  8:24   ` [PATCH v2 22/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 23/25] net/nfp: support IPv4 NVGRE decap flow action Chaoyong He
2022-10-22  8:24   ` [PATCH v2 24/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22  8:24   ` [PATCH v2 25/25] net/nfp: support new tunnel solution Chaoyong He
2022-10-24 15:09     ` Ferruh Yigit
2022-10-25  1:44       ` Chaoyong He
2022-10-25  8:18         ` Ferruh Yigit
2022-10-24 15:07   ` [PATCH v2 00/25] add the extend rte_flow offload support of nfp PMD Ferruh Yigit
2022-10-25  3:17     ` Chaoyong He
2022-10-25  3:29       ` Chaoyong He
2022-10-25  7:58   ` [PATCH v3 00/26] " Chaoyong He
2022-10-25  7:58     ` [PATCH v3 01/26] net/nfp: fix the app stuck by CPP bridge service Chaoyong He
2022-10-25  7:58     ` [PATCH v3 02/26] net/nfp: support IPv4 VXLAN flow item Chaoyong He
2022-10-25  7:58     ` [PATCH v3 03/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:58     ` [PATCH v3 04/26] net/nfp: prepare for IPv4 tunnel encap flow action Chaoyong He
2022-10-25  7:58     ` [PATCH v3 05/26] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-25  7:58     ` [PATCH v3 06/26] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-25  7:58     ` [PATCH v3 07/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 08/26] net/nfp: prepare for IPv4 UDP tunnel decap " Chaoyong He
2022-10-25  7:59     ` Chaoyong He [this message]
2022-10-25  7:59     ` [PATCH v3 10/26] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 11/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 12/26] net/nfp: support IPv4 GENEVE encap " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 13/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 14/26] net/nfp: support IPv4 GENEVE flow item Chaoyong He
2022-10-25  7:59     ` [PATCH v3 15/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 16/26] net/nfp: support IPv4 GENEVE decap flow action Chaoyong He
2022-10-25  7:59     ` [PATCH v3 17/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 18/26] net/nfp: support IPv4 NVGRE encap " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 19/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 20/26] net/nfp: prepare for IPv4 GRE tunnel decap " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 21/26] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 22/26] net/nfp: support IPv4 NVGRE flow item Chaoyong He
2022-10-25  7:59     ` [PATCH v3 23/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 24/26] net/nfp: support IPv4 NVGRE decap flow action Chaoyong He
2022-10-25  7:59     ` [PATCH v3 25/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25  7:59     ` [PATCH v3 26/26] net/nfp: support new solution for tunnel decap action Chaoyong He
2022-10-25 11:42     ` [PATCH v3 00/26] add the extend rte_flow offload support of nfp PMD Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221025075918.7778-10-chaoyong.he@corigine.com \
    --to=chaoyong.he@corigine.com \
    --cc=dev@dpdk.org \
    --cc=niklas.soderlund@corigine.com \
    --cc=oss-drivers@corigine.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).