DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/4] support offload of simple conntrack flow rules
@ 2023-09-30 10:00 Chaoyong He
  2023-09-30 10:00 ` [PATCH 1/4] net/nfp: prepare for the flow merge Chaoyong He
                   ` (5 more replies)
  0 siblings, 6 replies; 12+ messages in thread
From: Chaoyong He @ 2023-09-30 10:00 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

This patch series add the support of simple conntrack flow rules offload
through flower firmware by import the needed data structure and logic of
flow merge.

Chaoyong He (4):
  net/nfp: prepare for the flow merge
  net/nfp: add infrastructure for ct flow merge
  net/nfp: add call to add and delete the flows to firmware
  net/nfp: add support for merged flows and conntrack stats

 drivers/net/nfp/flower/nfp_conntrack.c | 1776 ++++++++++++++++++++++++
 drivers/net/nfp/flower/nfp_conntrack.h |   40 +
 drivers/net/nfp/meson.build            |    1 +
 drivers/net/nfp/nfp_flow.c             |  177 ++-
 drivers/net/nfp/nfp_flow.h             |   38 +
 5 files changed, 2003 insertions(+), 29 deletions(-)
 create mode 100644 drivers/net/nfp/flower/nfp_conntrack.c
 create mode 100644 drivers/net/nfp/flower/nfp_conntrack.h

-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/4] net/nfp: prepare for the flow merge
  2023-09-30 10:00 [PATCH 0/4] support offload of simple conntrack flow rules Chaoyong He
@ 2023-09-30 10:00 ` Chaoyong He
  2023-09-30 10:00 ` [PATCH 2/4] net/nfp: add infrastructure for ct " Chaoyong He
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 12+ messages in thread
From: Chaoyong He @ 2023-09-30 10:00 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

Move data structure and macro from source file to header file.
Export the needed function to header file.

We add two more parameter for 'nfp_flow_process()' to prepare for the
flow merge.
The 'cookie' moved as parameter is because the flow merge logic need
this cookie.
The 'install' parameter is needed because in flow merge, some flow are
not need install to the hardware.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/nfp_flow.c | 42 +++++++++++++++++---------------------
 drivers/net/nfp/nfp_flow.h | 31 ++++++++++++++++++++++++++++
 2 files changed, 50 insertions(+), 23 deletions(-)

diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index aa286535f7..1bb93bcfb5 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -89,17 +89,6 @@
 /* Tunnel ports */
 #define NFP_FL_PORT_TYPE_TUN            0x50000000
 
-/*
- * Maximum number of items in struct rte_flow_action_vxlan_encap.
- * ETH / IPv4(6) / UDP / VXLAN / END
- */
-#define ACTION_VXLAN_ENCAP_ITEMS_NUM 5
-
-struct vxlan_data {
-	struct rte_flow_action_vxlan_encap conf;
-	struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
-};
-
 /* Static initializer for a list of subsequent item types */
 #define NEXT_ITEM(...) \
 	((const enum rte_flow_item_type []){ \
@@ -359,7 +348,7 @@ nfp_check_mask_remove(struct nfp_flow_priv *priv,
 	return true;
 }
 
-static int
+int
 nfp_flow_table_add(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow)
 {
@@ -440,7 +429,7 @@ nfp_flow_alloc(struct nfp_fl_key_ls *key_layer, uint32_t port_id)
 	return NULL;
 }
 
-static void
+void
 nfp_flow_free(struct rte_flow *nfp_flow)
 {
 	rte_free(nfp_flow->payload.meta);
@@ -721,7 +710,8 @@ static void
 nfp_flow_compile_metadata(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow,
 		struct nfp_fl_key_ls *key_layer,
-		uint32_t stats_ctx)
+		uint32_t stats_ctx,
+		uint64_t cookie)
 {
 	struct nfp_fl_rule_metadata *nfp_flow_meta;
 	char *mbuf_off_exact;
@@ -737,7 +727,7 @@ nfp_flow_compile_metadata(struct nfp_flow_priv *priv,
 	nfp_flow_meta->act_len      = key_layer->act_size >> NFP_FL_LW_SIZ;
 	nfp_flow_meta->flags        = 0;
 	nfp_flow_meta->host_ctx_id  = rte_cpu_to_be_32(stats_ctx);
-	nfp_flow_meta->host_cookie  = rte_rand();
+	nfp_flow_meta->host_cookie  = rte_cpu_to_be_64(cookie);
 	nfp_flow_meta->flow_version = rte_cpu_to_be_64(priv->flower_version);
 
 	mbuf_off_exact = nfp_flow->payload.unmasked_data;
@@ -1958,7 +1948,7 @@ nfp_flow_is_tun_item(const struct rte_flow_item *item)
 	return false;
 }
 
-static bool
+bool
 nfp_flow_inner_item_get(const struct rte_flow_item items[],
 		const struct rte_flow_item **inner_item)
 {
@@ -3650,11 +3640,13 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
 	return 0;
 }
 
-static struct rte_flow *
+struct rte_flow *
 nfp_flow_process(struct nfp_flower_representor *representor,
 		const struct rte_flow_item items[],
 		const struct rte_flow_action actions[],
-		bool validate_flag)
+		bool validate_flag,
+		uint64_t cookie,
+		bool install_flag)
 {
 	int ret;
 	char *hash_data;
@@ -3690,9 +3682,9 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 		goto free_stats;
 	}
 
-	nfp_flow->install_flag = true;
+	nfp_flow->install_flag = install_flag;
 
-	nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx);
+	nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx, cookie);
 
 	ret = nfp_flow_compile_items(representor, items, nfp_flow);
 	if (ret != 0) {
@@ -3755,6 +3747,8 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 		__rte_unused struct rte_flow_error *error,
 		bool validate_flag)
 {
+	uint64_t cookie;
+
 	if (attr->group != 0)
 		PMD_DRV_LOG(INFO, "Pretend we support group attribute.");
 
@@ -3764,10 +3758,12 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 	if (attr->transfer != 0)
 		PMD_DRV_LOG(INFO, "Pretend we support transfer attribute.");
 
-	return nfp_flow_process(representor, items, actions, validate_flag);
+	cookie = rte_rand();
+
+	return nfp_flow_process(representor, items, actions, validate_flag, cookie, true);
 }
 
-static int
+int
 nfp_flow_teardown(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow,
 		bool validate_flag)
@@ -3895,7 +3891,7 @@ nfp_flow_create(struct rte_eth_dev *dev,
 	return NULL;
 }
 
-static int
+int
 nfp_flow_destroy(struct rte_eth_dev *dev,
 		struct rte_flow *nfp_flow,
 		struct rte_flow_error *error)
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index 7ce7f62453..817eaecba2 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -11,6 +11,17 @@
 /* The firmware expects lengths in units of long words */
 #define NFP_FL_LW_SIZ                   2
 
+/*
+ * Maximum number of items in struct rte_flow_action_vxlan_encap.
+ * ETH / IPv4(6) / UDP / VXLAN / END
+ */
+#define ACTION_VXLAN_ENCAP_ITEMS_NUM 5
+
+struct vxlan_data {
+	struct rte_flow_action_vxlan_encap conf;
+	struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
+};
+
 enum nfp_flower_tun_type {
 	NFP_FL_TUN_NONE   = 0,
 	NFP_FL_TUN_GRE    = 1,
@@ -153,8 +164,28 @@ struct rte_flow {
 	enum nfp_flow_type type;
 };
 
+/* Forward declaration */
+struct nfp_flower_representor;
+
 int nfp_flow_priv_init(struct nfp_pf_dev *pf_dev);
 void nfp_flow_priv_uninit(struct nfp_pf_dev *pf_dev);
 int nfp_net_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
+bool nfp_flow_inner_item_get(const struct rte_flow_item items[],
+		const struct rte_flow_item **inner_item);
+struct rte_flow *nfp_flow_process(struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		bool validate_flag,
+		uint64_t cookie,
+		bool install_flag);
+int nfp_flow_table_add(struct nfp_flow_priv *priv,
+		struct rte_flow *nfp_flow);
+int nfp_flow_teardown(struct nfp_flow_priv *priv,
+		struct rte_flow *nfp_flow,
+		bool validate_flag);
+void nfp_flow_free(struct rte_flow *nfp_flow);
+int nfp_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *nfp_flow,
+		struct rte_flow_error *error);
 
 #endif /* _NFP_FLOW_H_ */
-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/4] net/nfp: add infrastructure for ct flow merge
  2023-09-30 10:00 [PATCH 0/4] support offload of simple conntrack flow rules Chaoyong He
  2023-09-30 10:00 ` [PATCH 1/4] net/nfp: prepare for the flow merge Chaoyong He
@ 2023-09-30 10:00 ` Chaoyong He
  2023-09-30 10:00 ` [PATCH 3/4] net/nfp: add call to add and delete the flows to firmware Chaoyong He
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 12+ messages in thread
From: Chaoyong He @ 2023-09-30 10:00 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

Add the logic to process the merge of items and actions of
pre_ct and post_ct flow. The result will be stored in a field
of merged flow.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/flower/nfp_conntrack.c | 1626 ++++++++++++++++++++++++
 drivers/net/nfp/flower/nfp_conntrack.h |   32 +
 drivers/net/nfp/meson.build            |    1 +
 drivers/net/nfp/nfp_flow.c             |   56 +
 drivers/net/nfp/nfp_flow.h             |    4 +
 5 files changed, 1719 insertions(+)
 create mode 100644 drivers/net/nfp/flower/nfp_conntrack.c
 create mode 100644 drivers/net/nfp/flower/nfp_conntrack.h

diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
new file mode 100644
index 0000000000..24762de133
--- /dev/null
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -0,0 +1,1626 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include "nfp_conntrack.h"
+
+#include <rte_malloc.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+
+#include "../nfp_flow.h"
+#include "../nfp_logs.h"
+#include "nfp_flower_representor.h"
+
+struct ct_data {
+	uint8_t  ct_state;        /* Connection state. */
+	uint16_t ct_zone;         /* Connection zone. */
+};
+
+enum ct_entry_type {
+	CT_TYPE_PRE_CT,
+	CT_TYPE_POST_CT,
+};
+
+struct nfp_initial_flow {
+	struct rte_flow_item *items;
+	struct rte_flow_action *actions;
+	uint8_t items_cnt;
+	uint8_t actions_cnt;
+};
+
+struct nfp_ct_flow_entry {
+	uint64_t cookie;
+	LIST_ENTRY(nfp_ct_flow_entry) pre_ct_list;
+	LIST_ENTRY(nfp_ct_flow_entry) post_ct_list;
+	LIST_HEAD(, nfp_ct_merge_entry) children;
+	enum ct_entry_type type;
+	struct nfp_flower_representor *repr;
+	struct nfp_ct_zone_entry *ze;
+	struct nfp_initial_flow rule;
+};
+
+struct nfp_ct_map_entry {
+	uint64_t cookie;
+	struct nfp_ct_flow_entry *fe;
+};
+
+struct nfp_ct_zone_entry {
+	uint32_t zone;
+	struct nfp_flow_priv *priv;
+	LIST_HEAD(, nfp_ct_flow_entry) pre_ct_list;
+	LIST_HEAD(, nfp_ct_flow_entry) post_ct_list;
+	struct rte_hash *ct_merge_table;
+};
+
+struct nfp_ct_merge_entry {
+	uint64_t cookie[2];
+	LIST_ENTRY(nfp_ct_merge_entry) pre_ct_list;
+	LIST_ENTRY(nfp_ct_merge_entry) post_ct_list;
+	struct nfp_initial_flow rule;
+	struct nfp_ct_zone_entry *ze;
+	struct nfp_ct_flow_entry *pre_ct_parent;
+	struct nfp_ct_flow_entry *post_ct_parent;
+};
+
+/* OVS_KEY_ATTR_CT_STATE flags */
+#define OVS_CS_F_NEW            0x01 /* Beginning of a new connection. */
+#define OVS_CS_F_ESTABLISHED    0x02 /* Part of an existing connection. */
+#define OVS_CS_F_RELATED        0x04 /* Related to an established connection. */
+#define OVS_CS_F_REPLY_DIR      0x08 /* Flow is in the reply direction. */
+#define OVS_CS_F_INVALID        0x10 /* Could not track connection. */
+#define OVS_CS_F_TRACKED        0x20 /* Conntrack has occurred. */
+#define OVS_CS_F_SRC_NAT        0x40 /* Packet's source address/port was mangled by NAT. */
+#define OVS_CS_F_DST_NAT        0x80 /* Packet's destination address/port was mangled by NAT. */
+
+typedef void (*nfp_action_free_fn)(void *field);
+typedef bool (*nfp_action_copy_fn)(const void *src, void *dst);
+
+static bool
+is_pre_ct_flow(const struct ct_data *ct,
+		const struct rte_flow_action *actions)
+{
+	const struct rte_flow_action *action;
+
+	if (ct == NULL)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
+		if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK)
+			return true;
+	}
+
+	return false;
+}
+
+static bool
+is_post_ct_flow(const struct ct_data *ct)
+{
+	if (ct == NULL)
+		return false;
+
+	if ((ct->ct_state & OVS_CS_F_ESTABLISHED) != 0)
+		return true;
+
+	return false;
+}
+
+static bool
+is_ct_commit_flow(const struct ct_data *ct)
+{
+	if (ct == NULL)
+		return false;
+
+	if ((ct->ct_state & OVS_CS_F_NEW) != 0)
+		return true;
+
+	return false;
+}
+
+static struct nfp_ct_merge_entry *
+nfp_ct_merge_table_search(struct nfp_ct_zone_entry *ze,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int index;
+	uint32_t hash_key;
+	struct nfp_ct_merge_entry *m_ent;
+
+	hash_key = rte_jhash(hash_data, hash_len, ze->priv->hash_seed);
+	index = rte_hash_lookup_data(ze->ct_merge_table, &hash_key, (void **)&m_ent);
+	if (index < 0) {
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_merge table");
+		return NULL;
+	}
+
+	return m_ent;
+}
+
+static bool
+nfp_ct_merge_table_add(struct nfp_ct_zone_entry *ze,
+		struct nfp_ct_merge_entry *merge_entry)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(merge_entry, sizeof(uint64_t) * 2, ze->priv->hash_seed);
+	ret = rte_hash_add_key_data(ze->ct_merge_table, &hash_key, merge_entry);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add to ct_merge table failed");
+		return false;
+	}
+
+	return true;
+}
+
+static void
+nfp_ct_merge_table_delete(struct nfp_ct_zone_entry *ze,
+		struct nfp_ct_merge_entry *m_ent)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(m_ent, sizeof(uint64_t) * 2, ze->priv->hash_seed);
+	ret = rte_hash_del_key(ze->ct_merge_table, &hash_key);
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "Delete from ct_merge table failed, ret=%d", ret);
+}
+
+static void
+nfp_ct_merge_entry_destroy(struct nfp_ct_merge_entry *m_ent)
+{
+	struct nfp_ct_zone_entry *ze;
+
+	ze = m_ent->ze;
+	nfp_ct_merge_table_delete(ze, m_ent);
+
+	rte_free(m_ent->rule.actions);
+	rte_free(m_ent->rule.items);
+	LIST_REMOVE(m_ent, pre_ct_list);
+	LIST_REMOVE(m_ent, post_ct_list);
+	rte_free(m_ent);
+}
+
+struct nfp_ct_map_entry *
+nfp_ct_map_table_search(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int index;
+	uint32_t hash_key;
+	struct nfp_ct_map_entry *me;
+
+	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+	index = rte_hash_lookup_data(priv->ct_map_table, &hash_key, (void **)&me);
+	if (index < 0) {
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_map table");
+		return NULL;
+	}
+
+	return me;
+}
+
+static bool
+nfp_ct_map_table_add(struct nfp_flow_priv *priv,
+		struct nfp_ct_map_entry *me)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed);
+	ret = rte_hash_add_key_data(priv->ct_map_table, &hash_key, me);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add to ct_map table failed");
+		return false;
+	}
+
+	return true;
+}
+
+static void
+nfp_ct_map_table_delete(struct nfp_flow_priv *priv,
+		struct nfp_ct_map_entry *me)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed);
+	ret = rte_hash_del_key(priv->ct_map_table, &hash_key);
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "Delete form ct_map table failed");
+}
+
+static void
+nfp_ct_map_entry_destroy(struct nfp_ct_map_entry *me)
+{
+	rte_free(me);
+}
+
+static void
+nfp_ct_flow_item_free_real(void *field,
+		enum rte_flow_item_type type)
+{
+	switch (type) {
+	case RTE_FLOW_ITEM_TYPE_VOID:
+		break;
+	case RTE_FLOW_ITEM_TYPE_ETH:        /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_VLAN:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_MPLS:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_IPV4:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_IPV6:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_TCP:        /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_UDP:        /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_SCTP:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_ICMP:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_ICMP6:      /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_VXLAN:      /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_GRE:        /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_GRE_KEY:    /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		rte_free(field);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+nfp_ct_flow_item_free(struct rte_flow_item *item)
+{
+	if (item->spec != NULL)
+		nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->spec, item->type);
+
+	if (item->mask != NULL)
+		nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->mask, item->type);
+
+	if (item->last != NULL)
+		nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->last, item->type);
+}
+
+static void
+nfp_ct_flow_items_free(struct rte_flow_item *items,
+		uint8_t item_cnt)
+{
+	uint8_t loop;
+
+	for (loop = 0; loop < item_cnt; ++loop)
+		nfp_ct_flow_item_free(items + loop);
+}
+
+static bool
+nfp_flow_item_conf_size_get(enum rte_flow_item_type type,
+		size_t *size)
+{
+	size_t len = 0;
+
+	switch (type) {
+	case RTE_FLOW_ITEM_TYPE_VOID:
+		break;
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		len = sizeof(struct rte_flow_item_eth);
+		break;
+	case RTE_FLOW_ITEM_TYPE_VLAN:
+		len = sizeof(struct rte_flow_item_vlan);
+		break;
+	case RTE_FLOW_ITEM_TYPE_MPLS:
+		len = sizeof(struct rte_flow_item_mpls);
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		len = sizeof(struct rte_flow_item_ipv4);
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		len = sizeof(struct rte_flow_item_ipv6);
+		break;
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		len = sizeof(struct rte_flow_item_tcp);
+		break;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		len = sizeof(struct rte_flow_item_udp);
+		break;
+	case RTE_FLOW_ITEM_TYPE_SCTP:
+		len = sizeof(struct rte_flow_item_sctp);
+		break;
+	case RTE_FLOW_ITEM_TYPE_VXLAN:
+		len = sizeof(struct rte_flow_item_vxlan);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GRE:
+		len = sizeof(struct rte_flow_item_gre);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+		len = sizeof(rte_be32_t);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		len = sizeof(struct rte_flow_item_geneve);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type: %d", type);
+		return false;
+	}
+
+	*size = len;
+
+	return true;
+}
+
+static void *
+nfp_ct_flow_item_copy_real(const void *src,
+		enum rte_flow_item_type type)
+{
+	bool ret;
+	void *dst;
+	size_t len;
+
+	ret = nfp_flow_item_conf_size_get(type, &len);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Get flow item conf size failed");
+		return NULL;
+	}
+
+	dst = rte_zmalloc("flow_item", len, 0);
+	if (dst == NULL) {
+		PMD_DRV_LOG(ERR, "Malloc memory for ct item failed");
+		return NULL;
+	}
+
+	rte_memcpy(dst, src, len);
+
+	return dst;
+}
+
+static bool
+nfp_ct_flow_item_copy(const struct rte_flow_item *src,
+		struct rte_flow_item *dst)
+{
+	dst->type = src->type;
+
+	if (src->spec != NULL) {
+		dst->spec = nfp_ct_flow_item_copy_real(src->spec, src->type);
+		if (dst->spec == NULL) {
+			PMD_DRV_LOG(ERR, "Copy spec of ct item failed");
+			goto end;
+		}
+	}
+
+	if (src->mask != NULL) {
+		dst->mask = nfp_ct_flow_item_copy_real(src->mask, src->type);
+		if (dst->mask == NULL) {
+			PMD_DRV_LOG(ERR, "Copy mask of ct item failed");
+			goto free_spec;
+		}
+	}
+
+	if (src->last != NULL) {
+		dst->last = nfp_ct_flow_item_copy_real(src->last, src->type);
+		if (dst->last == NULL) {
+			PMD_DRV_LOG(ERR, "Copy last of ct item failed");
+			goto free_mask;
+		}
+	}
+
+	return true;
+
+free_mask:
+	nfp_ct_flow_item_free_real((void *)(ptrdiff_t)dst->mask, dst->type);
+free_spec:
+	nfp_ct_flow_item_free_real((void *)(ptrdiff_t)dst->spec, dst->type);
+end:
+	return false;
+}
+
+static bool
+nfp_ct_flow_items_copy(const struct rte_flow_item *src,
+		struct rte_flow_item *dst,
+		uint8_t item_cnt)
+{
+	bool ret;
+	uint8_t loop;
+
+	for (loop = 0; loop < item_cnt; ++loop) {
+		ret = nfp_ct_flow_item_copy(src + loop, dst + loop);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "Copy ct item failed");
+			nfp_ct_flow_items_free(dst, loop);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void
+nfp_ct_flow_action_free_real(void *field,
+		nfp_action_free_fn func)
+{
+	if (func != NULL)
+		func(field);
+
+	rte_free(field);
+}
+
+static void
+nfp_ct_flow_action_free_vxlan(void *field)
+{
+	struct vxlan_data *vxlan = field;
+
+	nfp_ct_flow_items_free(vxlan->items, ACTION_VXLAN_ENCAP_ITEMS_NUM);
+}
+
+static void
+nfp_ct_flow_action_free_raw(void *field)
+{
+	struct rte_flow_action_raw_encap *raw_encap = field;
+
+	rte_free(raw_encap->data);
+}
+
+static void
+nfp_ct_flow_action_free(struct rte_flow_action *action)
+{
+	nfp_action_free_fn func = NULL;
+
+	if (action->conf == NULL)
+		return;
+
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_VOID:           /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_MARK:           /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_DROP:           /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_COUNT:          /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_RSS:            /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_JUMP:           /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+		return;
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:        /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TTL:        /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:     /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+		break;
+	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		func = nfp_ct_flow_action_free_vxlan;
+		break;
+	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+		func = nfp_ct_flow_action_free_raw;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type);
+		break;
+	}
+
+	nfp_ct_flow_action_free_real((void *)(ptrdiff_t)action->conf, func);
+}
+
+static void
+nfp_ct_flow_actions_free(struct rte_flow_action *actions,
+		uint8_t action_cnt)
+{
+	uint8_t loop;
+
+	for (loop = 0; loop < action_cnt; ++loop)
+		nfp_ct_flow_action_free(actions + loop);
+}
+
+static void *
+nfp_ct_flow_action_copy_real(const void *src,
+		size_t len,
+		nfp_action_copy_fn func)
+{
+	bool ret;
+	void *dst;
+
+	dst = rte_zmalloc("flow_action", len, 0);
+	if (dst == NULL) {
+		PMD_DRV_LOG(ERR, "Malloc memory for ct action failed");
+		return NULL;
+	}
+
+	if (func != NULL) {
+		ret = func(src, dst);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "Copy ct action failed");
+			return NULL;
+		}
+
+		return dst;
+	}
+
+	rte_memcpy(dst, src, len);
+
+	return dst;
+}
+
+static bool
+nfp_ct_flow_action_copy_vxlan(const void *src,
+		void *dst)
+{
+	struct vxlan_data *vxlan_dst = dst;
+	const struct vxlan_data *vxlan_src = src;
+
+	vxlan_dst->conf.definition = vxlan_dst->items;
+	return nfp_ct_flow_items_copy(vxlan_src->items, vxlan_dst->items,
+			ACTION_VXLAN_ENCAP_ITEMS_NUM);
+}
+
+static bool
+nfp_ct_flow_action_copy_raw(const void *src,
+		void *dst)
+{
+	struct rte_flow_action_raw_encap *raw_dst = dst;
+	const struct rte_flow_action_raw_encap *raw_src = src;
+
+	raw_dst->size = raw_src->size;
+	raw_dst->data = nfp_ct_flow_action_copy_real(raw_src->data,
+			raw_src->size, NULL);
+	if (raw_dst->data == NULL) {
+		PMD_DRV_LOG(ERR, "Copy ct action process failed");
+		return false;
+	}
+
+	return true;
+}
+
+static bool
+nfp_ct_flow_action_copy(const struct rte_flow_action *src,
+		struct rte_flow_action *dst)
+{
+	size_t len;
+	nfp_action_copy_fn func = NULL;
+
+	dst->type = src->type;
+
+	if (src->conf == NULL)
+		return true;
+
+	switch (src->type) {
+	case RTE_FLOW_ACTION_TYPE_VOID:         /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_MARK:         /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_DROP:         /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_COUNT:        /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_RSS:          /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_JUMP:         /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+		return true;
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+		len = sizeof(struct rte_flow_action_set_mac);
+		break;
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:
+		len = sizeof(struct rte_flow_action_port_id);
+		break;
+	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+		len = sizeof(struct rte_flow_action_of_push_vlan);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+		len = sizeof(struct rte_flow_action_set_ipv4);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+		len = sizeof(struct rte_flow_action_set_dscp);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+		len = sizeof(struct rte_flow_action_set_ipv6);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_TTL:
+		len = sizeof(struct rte_flow_action_set_ttl);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+		len = sizeof(struct rte_flow_action_set_tp);
+		break;
+	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		len = sizeof(struct vxlan_data);
+		func = nfp_ct_flow_action_copy_vxlan;
+		break;
+	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+		len = sizeof(struct rte_flow_action_raw_encap);
+		func = nfp_ct_flow_action_copy_raw;
+		break;
+	default:
+		PMD_DRV_LOG(DEBUG, "Unsupported action type: %d", src->type);
+		return false;
+	}
+
+	dst->conf = nfp_ct_flow_action_copy_real(src->conf, len, func);
+	if (dst->conf == NULL) {
+		PMD_DRV_LOG(DEBUG, "Copy ct action process failed");
+		return false;
+	}
+
+	return true;
+}
+
+static bool
+nfp_ct_flow_actions_copy(const struct rte_flow_action *src,
+		struct rte_flow_action *dst,
+		uint8_t action_cnt)
+{
+	bool ret;
+	uint8_t loop;
+
+	for (loop = 0; loop < action_cnt; ++loop) {
+		ret = nfp_ct_flow_action_copy(src + loop, dst + loop);
+		if (!ret) {
+			PMD_DRV_LOG(DEBUG, "Copy ct action failed");
+			nfp_ct_flow_actions_free(dst, loop);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static struct nfp_ct_flow_entry *
+nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze,
+		struct nfp_flower_representor *repr,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		uint64_t cookie)
+{
+	bool ret;
+	uint8_t loop;
+	uint8_t item_cnt = 1;      /* the RTE_FLOW_ITEM_TYPE_END */
+	uint8_t action_cnt = 1;    /* the RTE_FLOW_ACTION_TYPE_END */
+	struct nfp_flow_priv *priv;
+	struct nfp_ct_map_entry *me;
+	struct nfp_ct_flow_entry *fe;
+
+	fe = rte_zmalloc("ct_flow_entry", sizeof(*fe), 0);
+	if (fe == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc ct_flow entry");
+		return NULL;
+	}
+
+	fe->ze = ze;
+	fe->repr = repr;
+	fe->cookie = cookie;
+	LIST_INIT(&fe->children);
+
+	for (loop = 0; (items + loop)->type != RTE_FLOW_ITEM_TYPE_END; loop++)
+		item_cnt++;
+	for (loop = 0; (actions + loop)->type != RTE_FLOW_ACTION_TYPE_END; loop++)
+		action_cnt++;
+
+	fe->rule.items = rte_zmalloc("ct_flow_item",
+			sizeof(struct rte_flow_item) * item_cnt, 0);
+	if (fe->rule.items == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc ct flow items");
+		goto free_flow_entry;
+	}
+
+	fe->rule.actions = rte_zmalloc("ct_flow_action",
+			sizeof(struct rte_flow_action) * action_cnt, 0);
+	if (fe->rule.actions == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc ct flow actions");
+		goto free_flow_item;
+	}
+
+	/* Deep copy of items */
+	ret = nfp_ct_flow_items_copy(items, fe->rule.items, item_cnt);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Could not deep copy ct flow items");
+		goto free_flow_action;
+	}
+
+	/* Deep copy of actions */
+	ret = nfp_ct_flow_actions_copy(actions, fe->rule.actions, action_cnt);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Could not deep copy ct flow actions");
+		goto free_copied_items;
+	}
+
+	fe->rule.items_cnt = item_cnt;
+	fe->rule.actions_cnt = action_cnt;
+
+	/* Now add a ct map entry */
+	me = rte_zmalloc("ct_map_entry", sizeof(*me), 0);
+	if (me == NULL) {
+		PMD_DRV_LOG(ERR, "Malloc memory for ct map entry failed");
+		goto free_copied_actions;
+	}
+
+	me->cookie = fe->cookie;
+	me->fe = fe;
+
+	priv = repr->app_fw_flower->flow_priv;
+	ret = nfp_ct_map_table_add(priv, me);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Add into ct map table failed");
+		goto free_map_entry;
+	}
+
+	return fe;
+
+free_map_entry:
+	nfp_ct_map_entry_destroy(me);
+free_copied_actions:
+	nfp_ct_flow_actions_free(fe->rule.actions, action_cnt);
+free_copied_items:
+	nfp_ct_flow_items_free(fe->rule.items, item_cnt);
+free_flow_action:
+	rte_free(fe->rule.actions);
+free_flow_item:
+	rte_free(fe->rule.items);
+free_flow_entry:
+	rte_free(fe);
+
+	return NULL;
+}
+
+static void
+nfp_flow_children_merge_free(struct nfp_ct_flow_entry *fe)
+{
+	struct nfp_ct_merge_entry *m_ent;
+
+	switch (fe->type) {
+	case CT_TYPE_PRE_CT:
+		LIST_FOREACH(m_ent, &fe->children, pre_ct_list)
+			nfp_ct_merge_entry_destroy(m_ent);
+		break;
+	case CT_TYPE_POST_CT:
+		LIST_FOREACH(m_ent, &fe->children, post_ct_list)
+			nfp_ct_merge_entry_destroy(m_ent);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+nfp_ct_flow_entry_destroy_partly(struct nfp_ct_flow_entry *fe)
+{
+	struct nfp_ct_map_entry *me;
+
+	if (!LIST_EMPTY(&fe->children))
+		nfp_flow_children_merge_free(fe);
+
+	me = nfp_ct_map_table_search(fe->ze->priv, (char *)&fe->cookie, sizeof(uint64_t));
+	if (me != NULL) {
+		nfp_ct_map_table_delete(fe->ze->priv, me);
+		nfp_ct_map_entry_destroy(me);
+	}
+
+	nfp_ct_flow_actions_free(fe->rule.actions, fe->rule.actions_cnt);
+	nfp_ct_flow_items_free(fe->rule.items, fe->rule.items_cnt);
+	rte_free(fe->rule.actions);
+	rte_free(fe->rule.items);
+	rte_free(fe);
+}
+
+static void
+nfp_ct_flow_entry_destroy(struct nfp_ct_flow_entry *fe)
+{
+	LIST_REMOVE(fe, pre_ct_list);
+	LIST_REMOVE(fe, post_ct_list);
+
+	nfp_ct_flow_entry_destroy_partly(fe);
+}
+
+static struct nfp_ct_zone_entry *
+nfp_ct_zone_table_search(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int index;
+	uint32_t hash_key;
+	struct nfp_ct_zone_entry *ze;
+
+	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+	index = rte_hash_lookup_data(priv->ct_zone_table, &hash_key, (void **)&ze);
+	if (index < 0) {
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_zone table");
+		return NULL;
+	}
+
+	return ze;
+}
+
+static bool
+nfp_ct_zone_table_add(struct nfp_flow_priv *priv,
+		struct nfp_ct_zone_entry *ze)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed);
+	ret = rte_hash_add_key_data(priv->ct_zone_table, &hash_key, ze);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add to the ct_zone table failed");
+		return false;
+	}
+
+	return true;
+}
+
+static void
+nfp_ct_zone_table_delete(struct nfp_flow_priv *priv,
+		struct nfp_ct_zone_entry *ze)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed);
+	ret = rte_hash_del_key(priv->ct_zone_table, &hash_key);
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "Delete from the ct_zone table failed");
+}
+
+static bool
+nfp_ct_zone_entry_init(struct nfp_ct_zone_entry *ze,
+		struct nfp_flow_priv *priv,
+		uint32_t zone,
+		bool wildcard)
+{
+	char hash_name[RTE_HASH_NAMESIZE];
+	struct rte_hash_parameters ct_merge_hash_params = {
+		.entries    = 1000,
+		.hash_func  = rte_jhash,
+		.socket_id  = rte_socket_id(),
+		.key_len    = sizeof(uint32_t),
+		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
+	};
+
+	if (wildcard) {
+		ct_merge_hash_params.name = "ct_wc_merge_table";
+	} else {
+		snprintf(hash_name, sizeof(hash_name), "ct_%d_merge_table", ze->zone);
+		ct_merge_hash_params.name = hash_name;
+	}
+
+	ct_merge_hash_params.hash_func_init_val = priv->hash_seed;
+	ze->ct_merge_table = rte_hash_create(&ct_merge_hash_params);
+	if (ze->ct_merge_table == NULL) {
+		PMD_DRV_LOG(ERR, "ct merge table creation failed");
+		return false;
+	}
+
+	ze->zone = zone;
+	ze->priv = priv;
+	LIST_INIT(&ze->pre_ct_list);
+	LIST_INIT(&ze->post_ct_list);
+
+	return true;
+}
+
+static void
+nfp_ct_zone_entry_destroy(struct nfp_ct_zone_entry *ze)
+{
+	struct nfp_ct_flow_entry *fe;
+
+	if (ze == NULL)
+		return;
+
+	rte_hash_free(ze->ct_merge_table);
+
+	LIST_FOREACH(fe, &ze->pre_ct_list, pre_ct_list)
+		nfp_ct_flow_entry_destroy(fe);
+
+	LIST_FOREACH(fe, &ze->post_ct_list, post_ct_list)
+		nfp_ct_flow_entry_destroy(fe);
+
+	rte_free(ze);
+}
+
+static struct nfp_ct_zone_entry *
+nfp_ct_zone_entry_get(struct nfp_flow_priv *priv,
+		uint32_t zone,
+		bool wildcard)
+{
+	bool is_ok;
+	struct nfp_ct_zone_entry *ze;
+
+	if (wildcard) {
+		if (priv->ct_zone_wc != NULL)
+			return priv->ct_zone_wc;
+
+		ze = rte_zmalloc("ct_zone_wc", sizeof(*ze), 0);
+		if (ze == NULL) {
+			PMD_DRV_LOG(ERR, "Could not alloc ct_zone_wc entry");
+			return NULL;
+		}
+
+		is_ok = nfp_ct_zone_entry_init(ze, priv, zone, true);
+		if (!is_ok) {
+			PMD_DRV_LOG(ERR, "Init ct zone wc entry failed");
+			goto free_ct_zone_entry;
+		}
+
+		priv->ct_zone_wc = ze;
+	} else {
+		ze = nfp_ct_zone_table_search(priv, (char *)&zone, sizeof(uint32_t));
+		if (ze != NULL)
+			return ze;
+
+		ze = rte_zmalloc("ct_zone_entry", sizeof(*ze), 0);
+		if (ze == NULL) {
+			PMD_DRV_LOG(ERR, "Could not alloc ct_zone entry");
+			return NULL;
+		}
+
+		is_ok = nfp_ct_zone_entry_init(ze, priv, zone, false);
+		if (!is_ok) {
+			PMD_DRV_LOG(ERR, "Init ct zone entry failed");
+			goto free_ct_zone_entry;
+		}
+
+		is_ok = nfp_ct_zone_table_add(priv, ze);
+		if (!is_ok) {
+			PMD_DRV_LOG(ERR, "Add into ct zone table failed");
+			goto free_ct_zone_entry;
+		}
+	}
+
+	return ze;
+
+free_ct_zone_entry:
+	nfp_ct_zone_entry_destroy(ze);
+
+	return NULL;
+}
+
+static void
+nfp_ct_zone_entry_free(struct nfp_ct_zone_entry *ze,
+		bool wildcard)
+{
+	if (LIST_EMPTY(&ze->pre_ct_list) && LIST_EMPTY(&ze->post_ct_list)) {
+		if (!wildcard)
+			nfp_ct_zone_table_delete(ze->priv, ze);
+
+		nfp_ct_zone_entry_destroy(ze);
+	}
+}
+
+static inline bool
+is_item_check_pass(const struct rte_flow_item *item1,
+		const struct rte_flow_item *item2,
+		uint8_t *cnt_same)
+{
+	bool pass;
+	uint32_t i;
+	size_t size;
+	const char *key1 = item1->spec;
+	const char *key2 = item2->spec;
+	const char *mask1 = item1->mask;
+	const char *mask2 = item2->mask;
+
+	if (item1->type != item2->type)
+		return true;
+
+	pass = nfp_flow_item_conf_size_get(item1->type, &size);
+	if (!pass)
+		return false;
+
+	for (i = 0; i < size; i++) {
+		if ((key1[i] & mask1[i] & mask2[i]) ^ (key2[i] & mask1[i] & mask2[i]))
+			return false;
+	}
+
+	*cnt_same = *cnt_same + 1;
+
+	return true;
+}
+
+static bool
+nfp_ct_merge_items_check(struct rte_flow_item *items1,
+		struct rte_flow_item *items2,
+		uint8_t *cnt_same)
+{
+	bool pass;
+	bool is_tun_flow_1;
+	bool is_tun_flow_2;
+	const struct rte_flow_item *item1;
+	const struct rte_flow_item *item2;
+	const struct rte_flow_item *inner_item1 = NULL;
+	const struct rte_flow_item *inner_item2 = NULL;
+
+	is_tun_flow_1 = nfp_flow_inner_item_get(items1, &inner_item1);
+	is_tun_flow_2 = nfp_flow_inner_item_get(items2, &inner_item2);
+
+	if (is_tun_flow_1) {
+		if (is_tun_flow_2) {
+			/* Outer layer */
+			for (item1 = items1; item1 != inner_item1; item1++) {
+				for (item2 = items2; item2 != inner_item2; item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+			/* Inner layer */
+			for (item1 = inner_item1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) {
+				for (item2 = inner_item2; item2->type != RTE_FLOW_ITEM_TYPE_END;
+						item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+		} else {
+			for (item1 = items1; item1 != inner_item1; item1++) {
+				for (item2 = items2; item2->type != RTE_FLOW_ITEM_TYPE_END;
+						item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+		}
+	} else {
+		if (is_tun_flow_2) {
+			for (item1 = items1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) {
+				for (item2 = items2; item2 != inner_item2; item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+		} else {
+			for (item1 = items1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) {
+				for (item2 = items2; item2->type != RTE_FLOW_ITEM_TYPE_END;
+						item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+		}
+	}
+
+	return true;
+}
+
+static inline bool
+is_action_pattern_check_pass(struct rte_flow_item *items,
+		enum rte_flow_item_type type)
+{
+	struct rte_flow_item *item;
+
+	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == type)
+			return false;
+	}
+
+	return true;
+}
+
+static bool
+nfp_ct_merge_action_check(struct rte_flow_action *action,
+		struct rte_flow_item *items)
+{
+	bool pass = true;
+
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+		pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_ETH);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+		pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_IPV4);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+		pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_IPV6);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+		pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_UDP);
+		pass |= is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_TCP);
+		pass |= is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_SCTP);
+		break;
+	default:
+		break;
+	}
+
+	return pass;
+}
+
+static bool
+nfp_ct_merge_actions_check(struct rte_flow_action *actions,
+		struct rte_flow_item *items,
+		uint8_t *cnt_same)
+{
+	bool pass = true;
+	struct rte_flow_action *action;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		switch (action->type) {
+		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:    /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:    /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:   /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:   /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:  /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:   /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:   /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:  /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:     /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+			pass = nfp_ct_merge_action_check(action, items);
+			break;
+		case RTE_FLOW_ACTION_TYPE_CONNTRACK: /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_JUMP:      /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_COUNT:     /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_DROP:      /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			*cnt_same = *cnt_same + 1;
+			break;
+		default:
+			pass = false;
+			break;
+		}
+	}
+
+	return pass;
+}
+
+static void
+nfp_ct_merge_item_real(const struct rte_flow_item *item_src,
+		struct rte_flow_item *item_dst)
+{
+	uint32_t i;
+	size_t size;
+	char *key_dst;
+	char *mask_dst;
+	const char *key_src;
+	const char *mask_src;
+
+	key_src = item_src->spec;
+	mask_src = item_src->mask;
+	key_dst = (char *)(ptrdiff_t)item_dst->spec;
+	mask_dst = (char *)(ptrdiff_t)item_dst->mask;
+	nfp_flow_item_conf_size_get(item_src->type, &size);
+
+	for (i = 0; i < size; i++) {
+		key_dst[i] |= key_src[i];
+		mask_dst[i] |= mask_src[i];
+	}
+}
+
+static bool
+nfp_ct_merge_item(uint32_t index,
+		const struct rte_flow_item *item1,
+		const struct rte_flow_item *item2_start,
+		const struct rte_flow_item *item2_end,
+		struct nfp_ct_merge_entry *merge_entry)
+{
+	struct rte_flow_item *item;
+	const struct rte_flow_item *item2;
+
+	/* Copy to the merged items */
+	item = &merge_entry->rule.items[index];
+	*item = *item1;
+
+	item2 = item2_start;
+	if (item2_end != NULL) {
+		for (; item2 != item2_end; item2++) {
+			if (item1->type == item2->type) {
+				nfp_ct_merge_item_real(item2, item);
+				return true;
+			}
+		}
+	} else {
+		for (; item2->type != RTE_FLOW_ITEM_TYPE_END; item2++) {
+			if (item1->type == item2->type) {
+				nfp_ct_merge_item_real(item2, item);
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+static void
+nfp_ct_merge_items(struct nfp_ct_merge_entry *merge_entry)
+{
+	uint32_t index = 0;
+	bool is_tun_flow_1;
+	bool is_tun_flow_2;
+	struct rte_flow_item *items1;
+	struct rte_flow_item *items2;
+	struct rte_flow_item *merge_item;
+	const struct rte_flow_item *item;
+	const struct rte_flow_item *inner1 = NULL;
+	const struct rte_flow_item *inner2 = NULL;
+
+	items1 = merge_entry->pre_ct_parent->rule.items;
+	items2 = merge_entry->post_ct_parent->rule.items;
+	is_tun_flow_1 = nfp_flow_inner_item_get(items1, &inner1);
+	is_tun_flow_2 = nfp_flow_inner_item_get(items2, &inner2);
+
+	if (is_tun_flow_1) {
+		if (is_tun_flow_2) {
+			/* Outer layer */
+			for (item = items1; item != inner1; item++, index++) {
+				if (nfp_ct_merge_item(index, item, items2, inner2, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning outer layer items */
+			for (item = items2; item != inner2; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+
+			/* Inner layer */
+			for (item = inner1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				if (nfp_ct_merge_item(index, item, inner2, NULL, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning inner layer items */
+			for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+		} else {
+			for (item = items1; item != inner1; item++, index++) {
+				if (nfp_ct_merge_item(index, item, items2, NULL, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning items */
+			for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+
+			/* Copy the inner layer items */
+			for (item = inner1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+		}
+	} else {
+		if (is_tun_flow_2) {
+			for (item = items1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				if (nfp_ct_merge_item(index, item, items2, inner2, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning items */
+			for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+		} else {
+			for (item = items1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				if (nfp_ct_merge_item(index, item, items2, NULL, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning items */
+			for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+		}
+	}
+}
+
+static void
+nfp_ct_merge_actions(struct nfp_ct_merge_entry *merge_entry)
+{
+	struct rte_flow_action *action;
+	struct rte_flow_action *merge_actions;
+
+	merge_actions = merge_entry->rule.actions;
+
+	action = merge_entry->pre_ct_parent->rule.actions;
+	for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK ||
+				action->type == RTE_FLOW_ACTION_TYPE_JUMP)
+			continue;
+
+		*merge_actions = *action;
+		merge_actions++;
+	}
+
+	action = merge_entry->post_ct_parent->rule.actions;
+	for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		*merge_actions = *action;
+		merge_actions++;
+	}
+}
+
+static bool
+nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze,
+		struct nfp_ct_flow_entry *pre_ct_entry,
+		struct nfp_ct_flow_entry *post_ct_entry)
+{
+	bool ret;
+	uint64_t new_cookie[2];
+	uint8_t cnt_same_item = 0;
+	uint8_t cnt_same_action = 0;
+	struct nfp_ct_merge_entry *merge_entry;
+
+	if (pre_ct_entry->repr != post_ct_entry->repr)
+		return true;
+
+	ret = nfp_ct_merge_items_check(pre_ct_entry->rule.items,
+			post_ct_entry->rule.items, &cnt_same_item);
+	if (!ret)
+		return true;
+
+	ret = nfp_ct_merge_actions_check(pre_ct_entry->rule.actions,
+			post_ct_entry->rule.items, &cnt_same_action);
+	if (!ret)
+		return true;
+
+	new_cookie[0] = pre_ct_entry->cookie;
+	new_cookie[1] = post_ct_entry->cookie;
+	merge_entry = nfp_ct_merge_table_search(ze, (char *)&new_cookie, sizeof(uint64_t) * 2);
+	if (merge_entry != NULL)
+		return true;
+
+	merge_entry = rte_zmalloc("ct_merge_entry", sizeof(*merge_entry), 0);
+	if (merge_entry == NULL) {
+		PMD_DRV_LOG(ERR, "Malloc memory for ct merge entry failed");
+		return false;
+	}
+
+	merge_entry->ze = ze;
+	merge_entry->pre_ct_parent = pre_ct_entry;
+	merge_entry->post_ct_parent = post_ct_entry;
+	rte_memcpy(merge_entry->cookie, new_cookie, sizeof(new_cookie));
+	merge_entry->rule.items_cnt = pre_ct_entry->rule.items_cnt +
+			post_ct_entry->rule.items_cnt - cnt_same_item - 1;
+	merge_entry->rule.actions_cnt = pre_ct_entry->rule.actions_cnt +
+			post_ct_entry->rule.actions_cnt - cnt_same_action - 1;
+
+	merge_entry->rule.items = rte_zmalloc("ct_flow_item",
+			sizeof(struct rte_flow_item) * merge_entry->rule.items_cnt, 0);
+	if (merge_entry->rule.items == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc items for merged flow");
+		goto merge_exit;
+	}
+
+	merge_entry->rule.actions = rte_zmalloc("ct_flow_action",
+			sizeof(struct rte_flow_action) * merge_entry->rule.actions_cnt, 0);
+	if (merge_entry->rule.actions == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc actions for merged flow");
+		goto free_items;
+	}
+
+	nfp_ct_merge_items(merge_entry);
+	nfp_ct_merge_actions(merge_entry);
+
+	/* Add this entry to the pre_ct and post_ct lists */
+	LIST_INSERT_HEAD(&pre_ct_entry->children, merge_entry, pre_ct_list);
+	LIST_INSERT_HEAD(&post_ct_entry->children, merge_entry, post_ct_list);
+
+	ret = nfp_ct_merge_table_add(ze, merge_entry);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Add into ct merge table failed");
+		goto free_actions;
+	}
+
+	return true;
+
+free_actions:
+	rte_free(merge_entry->rule.actions);
+free_items:
+	rte_free(merge_entry->rule.items);
+merge_exit:
+	LIST_REMOVE(merge_entry, post_ct_list);
+	LIST_REMOVE(merge_entry, pre_ct_list);
+	rte_free(merge_entry);
+
+	return ret;
+}
+
+static bool
+nfp_ct_merge_flow_entries(struct nfp_ct_flow_entry *fe,
+		struct nfp_ct_zone_entry *ze_src,
+		struct nfp_ct_zone_entry *ze_dst)
+{
+	bool ret;
+	struct nfp_ct_flow_entry *fe_tmp;
+
+	if (fe->type == CT_TYPE_PRE_CT) {
+		LIST_FOREACH(fe_tmp, &ze_src->post_ct_list, post_ct_list) {
+			ret = nfp_ct_do_flow_merge(ze_dst, fe, fe_tmp);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "Merge for ct pre flow failed");
+				return false;
+			}
+		}
+	} else {
+		LIST_FOREACH(fe_tmp, &ze_src->pre_ct_list, pre_ct_list) {
+			ret = nfp_ct_do_flow_merge(ze_dst, fe_tmp, fe);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "Merge for ct post flow failed");
+				return false;
+			}
+		}
+	}
+
+	return true;
+}
+
+static bool
+nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item,
+		struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		uint64_t cookie)
+{
+	bool ret;
+	struct nfp_flow_priv *priv;
+	struct nfp_ct_zone_entry *ze;
+	struct nfp_ct_flow_entry *fe;
+	const struct ct_data *ct = ct_item->spec;
+
+	priv = representor->app_fw_flower->flow_priv;
+	ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, false);
+	if (ze == NULL) {
+		PMD_DRV_LOG(ERR, "Could not get ct zone entry");
+		return false;
+	}
+
+	/* Add entry to pre_ct_lsit */
+	fe = nfp_ct_flow_entry_get(ze, representor, items, actions, cookie);
+	if (fe == NULL) {
+		PMD_DRV_LOG(ERR, "Could not get ct flow entry");
+		goto ct_zone_entry_free;
+	}
+
+	fe->type = CT_TYPE_PRE_CT;
+	LIST_INSERT_HEAD(&ze->pre_ct_list, fe, pre_ct_list);
+
+	ret = nfp_ct_merge_flow_entries(fe, ze, ze);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Merge ct flow entries failed");
+		goto ct_flow_entry_free;
+	}
+
+	/* Need to check and merge with tables in the wc_zone as well */
+	if (priv->ct_zone_wc != NULL) {
+		ret = nfp_ct_merge_flow_entries(fe, priv->ct_zone_wc, ze);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed");
+			goto ct_flow_entry_free;
+		}
+	}
+
+	/* The real offload logic comes in next commit, so here just return false for now */
+
+ct_flow_entry_free:
+	nfp_ct_flow_entry_destroy(fe);
+
+ct_zone_entry_free:
+	nfp_ct_zone_entry_free(ze, false);
+
+	return false;
+}
+
+static bool
+nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item,
+		struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		uint64_t cookie)
+{
+	bool ret;
+	void *next_data;
+	uint32_t iter = 0;
+	const void *next_key;
+	bool wildcard = false;
+	struct nfp_flow_priv *priv;
+	struct nfp_ct_zone_entry *ze;
+	struct nfp_ct_flow_entry *fe;
+	const struct ct_data *ct = ct_item->spec;
+	const struct ct_data *ct_mask = ct_item->mask;
+
+	if (ct_mask->ct_zone == 0) {
+		wildcard = true;
+	} else if (ct_mask->ct_zone != UINT16_MAX) {
+		PMD_DRV_LOG(ERR, "Partially wildcard ct_zone is not supported");
+		return false;
+	}
+
+	priv = representor->app_fw_flower->flow_priv;
+	ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, wildcard);
+	if (ze == NULL) {
+		PMD_DRV_LOG(ERR, "Could not get ct zone entry");
+		return false;
+	}
+
+	/* Add entry to post_ct_list */
+	fe = nfp_ct_flow_entry_get(ze, representor, items, actions, cookie);
+	if (fe == NULL) {
+		PMD_DRV_LOG(ERR, "Could not get ct flow entry");
+		goto ct_zone_entry_free;
+	}
+
+	fe->type = CT_TYPE_POST_CT;
+	LIST_INSERT_HEAD(&ze->post_ct_list, fe, post_ct_list);
+
+	if (wildcard) {
+		while (rte_hash_iterate(priv->ct_zone_table, &next_key, &next_data, &iter) >= 0) {
+			ze = (struct nfp_ct_zone_entry *)next_data;
+			ret = nfp_ct_merge_flow_entries(fe, ze, ze);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed");
+				break;
+			}
+		}
+	} else {
+		ret = nfp_ct_merge_flow_entries(fe, ze, ze);
+	}
+
+	if (!ret)
+		goto ct_flow_entry_free;
+
+	/* The real offload logic comes in next commit, so here just return false for now */
+
+ct_flow_entry_free:
+	nfp_ct_flow_entry_destroy(fe);
+
+ct_zone_entry_free:
+	nfp_ct_zone_entry_free(ze, wildcard);
+
+	return false;
+}
+
+struct rte_flow *
+nfp_ct_flow_setup(struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		const struct rte_flow_item *ct_item,
+		bool validate_flag,
+		uint64_t cookie)
+{
+	const struct ct_data *ct;
+
+	if (ct_item == NULL)
+		return NULL;
+
+	ct = ct_item->spec;
+
+	if (is_ct_commit_flow(ct)) {
+		return nfp_flow_process(representor, &items[1], actions,
+				validate_flag, cookie, false);
+	}
+
+	if (is_post_ct_flow(ct)) {
+		if (nfp_flow_handle_post_ct(ct_item, representor, &items[1],
+				actions, cookie)) {
+			return nfp_flow_process(representor, &items[1], actions,
+					validate_flag, cookie, false);
+		}
+
+		PMD_DRV_LOG(ERR, "Handle nfp post ct flow failed.");
+		return NULL;
+	}
+
+	if (is_pre_ct_flow(ct, actions)) {
+		if (nfp_flow_handle_pre_ct(ct_item, representor, &items[1],
+				actions, cookie)) {
+			return nfp_flow_process(representor, &items[1], actions,
+					validate_flag, cookie, false);
+		}
+
+		PMD_DRV_LOG(ERR, "Handle nfp pre ct flow failed.");
+		return NULL;
+	}
+
+	PMD_DRV_LOG(ERR, "Unsupported ct flow type.");
+	return NULL;
+}
diff --git a/drivers/net/nfp/flower/nfp_conntrack.h b/drivers/net/nfp/flower/nfp_conntrack.h
new file mode 100644
index 0000000000..149a3eb040
--- /dev/null
+++ b/drivers/net/nfp/flower/nfp_conntrack.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CONNTRACK_H__
+#define __NFP_CONNTRACK_H__
+
+#include <stdbool.h>
+
+#include <rte_flow.h>
+
+#include "../nfp_flow.h"
+
+struct nfp_ct_map_entry;
+
+struct nfp_ct_zone_entry;
+
+struct nfp_ct_merge_entry;
+
+struct nfp_ct_map_entry *nfp_ct_map_table_search(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len);
+
+struct rte_flow *nfp_ct_flow_setup(struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		const struct rte_flow_item *ct_item,
+		bool validate_flag,
+		uint64_t cookie);
+
+#endif /* __NFP_CONNTRACK_H__ */
diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build
index d422269c4b..e6a642da49 100644
--- a/drivers/net/nfp/meson.build
+++ b/drivers/net/nfp/meson.build
@@ -6,6 +6,7 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
     reason = 'only supported on 64-bit Linux'
 endif
 sources = files(
+        'flower/nfp_conntrack.c',
         'flower/nfp_flower.c',
         'flower/nfp_flower_cmsg.c',
         'flower/nfp_flower_ctrl.c',
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 1bb93bcfb5..16a5c7e055 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -10,6 +10,7 @@
 #include <rte_jhash.h>
 #include <rte_malloc.h>
 
+#include "flower/nfp_conntrack.h"
 #include "flower/nfp_flower_representor.h"
 #include "nfpcore/nfp_rtsym.h"
 #include "nfp_logs.h"
@@ -3748,6 +3749,8 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 		bool validate_flag)
 {
 	uint64_t cookie;
+	const struct rte_flow_item *item;
+	const struct rte_flow_item *ct_item = NULL;
 
 	if (attr->group != 0)
 		PMD_DRV_LOG(INFO, "Pretend we support group attribute.");
@@ -3758,8 +3761,19 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 	if (attr->transfer != 0)
 		PMD_DRV_LOG(INFO, "Pretend we support transfer attribute.");
 
+	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_CONNTRACK) {
+			ct_item = item;
+			break;
+		}
+	}
+
 	cookie = rte_rand();
 
+	if (ct_item != NULL)
+		return nfp_ct_flow_setup(representor, items, actions,
+				ct_item, validate_flag, cookie);
+
 	return nfp_flow_process(representor, items, actions, validate_flag, cookie, true);
 }
 
@@ -4235,6 +4249,23 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
 	};
 
+	struct rte_hash_parameters ct_zone_hash_params = {
+		.name       = "ct_zone_table",
+		.entries    = 65536,
+		.hash_func  = rte_jhash,
+		.socket_id  = rte_socket_id(),
+		.key_len    = sizeof(uint32_t),
+		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
+	};
+
+	struct rte_hash_parameters ct_map_hash_params = {
+		.name       = "ct_map_table",
+		.hash_func  = rte_jhash,
+		.socket_id  = rte_socket_id(),
+		.key_len    = sizeof(uint32_t),
+		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
+	};
+
 	ctx_count = nfp_rtsym_read_le(pf_dev->sym_tbl,
 			"CONFIG_FC_HOST_CTX_COUNT", &ret);
 	if (ret < 0) {
@@ -4325,6 +4356,25 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 		goto free_flow_table;
 	}
 
+	/* ct zone table */
+	ct_zone_hash_params.hash_func_init_val = priv->hash_seed;
+	priv->ct_zone_table = rte_hash_create(&ct_zone_hash_params);
+	if (priv->ct_zone_table == NULL) {
+		PMD_INIT_LOG(ERR, "ct zone table creation failed");
+		ret = -ENOMEM;
+		goto free_pre_tnl_table;
+	}
+
+	/* ct map table */
+	ct_map_hash_params.hash_func_init_val = priv->hash_seed;
+	ct_map_hash_params.entries = ctx_count;
+	priv->ct_map_table = rte_hash_create(&ct_map_hash_params);
+	if (priv->ct_map_table == NULL) {
+		PMD_INIT_LOG(ERR, "ct map table creation failed");
+		ret = -ENOMEM;
+		goto free_ct_zone_table;
+	}
+
 	/* ipv4 off list */
 	rte_spinlock_init(&priv->ipv4_off_lock);
 	LIST_INIT(&priv->ipv4_off_list);
@@ -4338,6 +4388,10 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 
 	return 0;
 
+free_ct_zone_table:
+	rte_hash_free(priv->ct_zone_table);
+free_pre_tnl_table:
+	rte_hash_free(priv->pre_tun_table);
 free_flow_table:
 	rte_hash_free(priv->flow_table);
 free_mask_table:
@@ -4363,6 +4417,8 @@ nfp_flow_priv_uninit(struct nfp_pf_dev *pf_dev)
 	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
 	priv = app_fw_flower->flow_priv;
 
+	rte_hash_free(priv->ct_map_table);
+	rte_hash_free(priv->ct_zone_table);
 	rte_hash_free(priv->pre_tun_table);
 	rte_hash_free(priv->flow_table);
 	rte_hash_free(priv->mask_table);
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index 817eaecba2..df16cab8b5 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -150,6 +150,10 @@ struct nfp_flow_priv {
 	rte_spinlock_t ipv6_off_lock; /**< Lock the ipv6 off list */
 	/* neighbor next */
 	LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */
+	/* Conntrack */
+	struct rte_hash *ct_zone_table; /**< Hash table to store ct zone entry */
+	struct nfp_ct_zone_entry *ct_zone_wc; /**< The wildcard ct zone entry */
+	struct rte_hash *ct_map_table; /**< Hash table to store ct map entry */
 };
 
 struct rte_flow {
-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 3/4] net/nfp: add call to add and delete the flows to firmware
  2023-09-30 10:00 [PATCH 0/4] support offload of simple conntrack flow rules Chaoyong He
  2023-09-30 10:00 ` [PATCH 1/4] net/nfp: prepare for the flow merge Chaoyong He
  2023-09-30 10:00 ` [PATCH 2/4] net/nfp: add infrastructure for ct " Chaoyong He
@ 2023-09-30 10:00 ` Chaoyong He
  2023-09-30 10:00 ` [PATCH 4/4] net/nfp: add support for merged flows and conntrack stats Chaoyong He
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 12+ messages in thread
From: Chaoyong He @ 2023-09-30 10:00 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

Add the offload call to add and delete the flows to the firmware.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/flower/nfp_conntrack.c | 112 ++++++++++++++++++++++++-
 drivers/net/nfp/flower/nfp_conntrack.h |   5 ++
 drivers/net/nfp/nfp_flow.c             |   8 ++
 3 files changed, 122 insertions(+), 3 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
index 24762de133..d81e2970fb 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.c
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -9,8 +9,8 @@
 #include <rte_hash.h>
 #include <rte_jhash.h>
 
-#include "../nfp_flow.h"
 #include "../nfp_logs.h"
+#include "nfp_flower_cmsg.h"
 #include "nfp_flower_representor.h"
 
 struct ct_data {
@@ -59,6 +59,7 @@ struct nfp_ct_merge_entry {
 	LIST_ENTRY(nfp_ct_merge_entry) pre_ct_list;
 	LIST_ENTRY(nfp_ct_merge_entry) post_ct_list;
 	struct nfp_initial_flow rule;
+	struct rte_flow *compiled_rule;
 	struct nfp_ct_zone_entry *ze;
 	struct nfp_ct_flow_entry *pre_ct_parent;
 	struct nfp_ct_flow_entry *post_ct_parent;
@@ -984,6 +985,102 @@ nfp_ct_zone_entry_free(struct nfp_ct_zone_entry *ze,
 	}
 }
 
+static int
+nfp_ct_offload_add(struct nfp_flower_representor *repr,
+		struct nfp_ct_merge_entry *merge_entry)
+{
+	int ret;
+	uint64_t cookie;
+	struct rte_flow *nfp_flow;
+	struct nfp_flow_priv *priv;
+	const struct rte_flow_item *items;
+	const struct rte_flow_action *actions;
+
+	cookie = rte_rand();
+	items = merge_entry->rule.items;
+	actions = merge_entry->rule.actions;
+	nfp_flow = nfp_flow_process(repr, items, actions, false, cookie, true);
+	if (nfp_flow == NULL) {
+		PMD_DRV_LOG(ERR, "Process the merged flow rule failed.");
+		return -EINVAL;
+	}
+
+	/* Add the flow to hardware */
+	priv = repr->app_fw_flower->flow_priv;
+	ret = nfp_flower_cmsg_flow_add(repr->app_fw_flower, nfp_flow);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add the merged flow to firmware failed.");
+		goto flow_teardown;
+	}
+
+	/* Add the flow to flow hash table */
+	ret = nfp_flow_table_add(priv, nfp_flow);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add the merged flow to flow table failed.");
+		goto flow_teardown;
+	}
+
+	merge_entry->compiled_rule = nfp_flow;
+
+	return 0;
+
+flow_teardown:
+	nfp_flow_teardown(priv, nfp_flow, false);
+	nfp_flow_free(nfp_flow);
+
+	return ret;
+}
+
+int
+nfp_ct_offload_del(struct rte_eth_dev *dev,
+		struct nfp_ct_map_entry *me,
+		struct rte_flow_error *error)
+{
+	int ret;
+	struct nfp_ct_flow_entry *fe;
+	struct nfp_ct_merge_entry *m_ent;
+
+	fe = me->fe;
+
+	if (fe->type == CT_TYPE_PRE_CT) {
+		LIST_FOREACH(m_ent, &fe->children, pre_ct_list) {
+			if (m_ent->compiled_rule != NULL) {
+				ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error);
+				if (ret != 0) {
+					PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item");
+					return -EINVAL;
+				}
+				m_ent->compiled_rule = NULL;
+			}
+
+			m_ent->pre_ct_parent = NULL;
+			LIST_REMOVE(m_ent, pre_ct_list);
+			if (m_ent->post_ct_parent == NULL)
+				nfp_ct_merge_entry_destroy(m_ent);
+		}
+	} else {
+		LIST_FOREACH(m_ent, &fe->children, post_ct_list) {
+			if (m_ent->compiled_rule != NULL) {
+				ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error);
+				if (ret != 0) {
+					PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item");
+					return -EINVAL;
+				}
+				m_ent->compiled_rule = NULL;
+			}
+
+			m_ent->post_ct_parent = NULL;
+			LIST_REMOVE(m_ent, post_ct_list);
+			if (m_ent->pre_ct_parent == NULL)
+				nfp_ct_merge_entry_destroy(m_ent);
+		}
+	}
+
+	nfp_ct_flow_entry_destroy_partly(fe);
+
+	return 0;
+}
+
 static inline bool
 is_item_check_pass(const struct rte_flow_item *item1,
 		const struct rte_flow_item *item2,
@@ -1411,8 +1508,17 @@ nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze,
 		goto free_actions;
 	}
 
+	/* Send to firmware */
+	ret = nfp_ct_offload_add(pre_ct_entry->repr, merge_entry);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Send the merged flow to firmware failed");
+		goto merge_table_del;
+	}
+
 	return true;
 
+merge_table_del:
+	nfp_ct_merge_table_delete(ze, merge_entry);
 free_actions:
 	rte_free(merge_entry->rule.actions);
 free_items:
@@ -1499,7 +1605,7 @@ nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item,
 		}
 	}
 
-	/* The real offload logic comes in next commit, so here just return false for now */
+	return true;
 
 ct_flow_entry_free:
 	nfp_ct_flow_entry_destroy(fe);
@@ -1568,7 +1674,7 @@ nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item,
 	if (!ret)
 		goto ct_flow_entry_free;
 
-	/* The real offload logic comes in next commit, so here just return false for now */
+	return true;
 
 ct_flow_entry_free:
 	nfp_ct_flow_entry_destroy(fe);
diff --git a/drivers/net/nfp/flower/nfp_conntrack.h b/drivers/net/nfp/flower/nfp_conntrack.h
index 149a3eb040..2f47280716 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.h
+++ b/drivers/net/nfp/flower/nfp_conntrack.h
@@ -8,6 +8,7 @@
 
 #include <stdbool.h>
 
+#include <ethdev_driver.h>
 #include <rte_flow.h>
 
 #include "../nfp_flow.h"
@@ -22,6 +23,10 @@ struct nfp_ct_map_entry *nfp_ct_map_table_search(struct nfp_flow_priv *priv,
 		char *hash_data,
 		uint32_t hash_len);
 
+int nfp_ct_offload_del(struct rte_eth_dev *dev,
+		struct nfp_ct_map_entry *me,
+		struct rte_flow_error *error);
+
 struct rte_flow *nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 		const struct rte_flow_item items[],
 		const struct rte_flow_action actions[],
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 16a5c7e055..a6439679d3 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -3911,8 +3911,10 @@ nfp_flow_destroy(struct rte_eth_dev *dev,
 		struct rte_flow_error *error)
 {
 	int ret;
+	uint64_t cookie;
 	struct rte_flow *flow_find;
 	struct nfp_flow_priv *priv;
+	struct nfp_ct_map_entry *me;
 	struct nfp_app_fw_flower *app_fw_flower;
 	struct nfp_flower_representor *representor;
 
@@ -3920,6 +3922,12 @@ nfp_flow_destroy(struct rte_eth_dev *dev,
 	app_fw_flower = representor->app_fw_flower;
 	priv = app_fw_flower->flow_priv;
 
+	/* Find the flow in ct_map_table */
+	cookie = rte_be_to_cpu_64(nfp_flow->payload.meta->host_cookie);
+	me = nfp_ct_map_table_search(priv, (char *)&cookie, sizeof(uint64_t));
+	if (me != NULL)
+		return nfp_ct_offload_del(dev, me, error);
+
 	/* Find the flow in flow hash table */
 	flow_find = nfp_flow_table_search(priv, nfp_flow);
 	if (flow_find == NULL) {
-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 4/4] net/nfp: add support for merged flows and conntrack stats
  2023-09-30 10:00 [PATCH 0/4] support offload of simple conntrack flow rules Chaoyong He
                   ` (2 preceding siblings ...)
  2023-09-30 10:00 ` [PATCH 3/4] net/nfp: add call to add and delete the flows to firmware Chaoyong He
@ 2023-09-30 10:00 ` Chaoyong He
  2023-10-03 12:46 ` [PATCH 0/4] support offload of simple conntrack flow rules Ferruh Yigit
  2023-10-04  9:35 ` [PATCH v2 " Chaoyong He
  5 siblings, 0 replies; 12+ messages in thread
From: Chaoyong He @ 2023-09-30 10:00 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

Adjust the original logic to make it valid for both normal flow
and merged flow.
Add the logic to update ct flow stats.
Add the support of conntrack action.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/flower/nfp_conntrack.c | 54 ++++++++++++++++--
 drivers/net/nfp/flower/nfp_conntrack.h |  3 +
 drivers/net/nfp/nfp_flow.c             | 79 ++++++++++++++++++++++----
 drivers/net/nfp/nfp_flow.h             |  7 ++-
 4 files changed, 126 insertions(+), 17 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
index d81e2970fb..3df2411ce4 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.c
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -39,6 +39,7 @@ struct nfp_ct_flow_entry {
 	struct nfp_flower_representor *repr;
 	struct nfp_ct_zone_entry *ze;
 	struct nfp_initial_flow rule;
+	struct nfp_fl_stats stats;
 };
 
 struct nfp_ct_map_entry {
@@ -56,6 +57,7 @@ struct nfp_ct_zone_entry {
 
 struct nfp_ct_merge_entry {
 	uint64_t cookie[2];
+	uint32_t ctx_id;
 	LIST_ENTRY(nfp_ct_merge_entry) pre_ct_list;
 	LIST_ENTRY(nfp_ct_merge_entry) post_ct_list;
 	struct nfp_initial_flow rule;
@@ -999,12 +1001,14 @@ nfp_ct_offload_add(struct nfp_flower_representor *repr,
 	cookie = rte_rand();
 	items = merge_entry->rule.items;
 	actions = merge_entry->rule.actions;
-	nfp_flow = nfp_flow_process(repr, items, actions, false, cookie, true);
+	nfp_flow = nfp_flow_process(repr, items, actions, false, cookie, true, true);
 	if (nfp_flow == NULL) {
 		PMD_DRV_LOG(ERR, "Process the merged flow rule failed.");
 		return -EINVAL;
 	}
 
+	merge_entry->ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id);
+
 	/* Add the flow to hardware */
 	priv = repr->app_fw_flower->flow_priv;
 	ret = nfp_flower_cmsg_flow_add(repr->app_fw_flower, nfp_flow);
@@ -1014,7 +1018,7 @@ nfp_ct_offload_add(struct nfp_flower_representor *repr,
 	}
 
 	/* Add the flow to flow hash table */
-	ret = nfp_flow_table_add(priv, nfp_flow);
+	ret = nfp_flow_table_add_merge(priv, nfp_flow);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "Add the merged flow to flow table failed.");
 		goto flow_teardown;
@@ -1702,14 +1706,14 @@ nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 
 	if (is_ct_commit_flow(ct)) {
 		return nfp_flow_process(representor, &items[1], actions,
-				validate_flag, cookie, false);
+				validate_flag, cookie, false, false);
 	}
 
 	if (is_post_ct_flow(ct)) {
 		if (nfp_flow_handle_post_ct(ct_item, representor, &items[1],
 				actions, cookie)) {
 			return nfp_flow_process(representor, &items[1], actions,
-					validate_flag, cookie, false);
+					validate_flag, cookie, false, false);
 		}
 
 		PMD_DRV_LOG(ERR, "Handle nfp post ct flow failed.");
@@ -1720,7 +1724,7 @@ nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 		if (nfp_flow_handle_pre_ct(ct_item, representor, &items[1],
 				actions, cookie)) {
 			return nfp_flow_process(representor, &items[1], actions,
-					validate_flag, cookie, false);
+					validate_flag, cookie, false, false);
 		}
 
 		PMD_DRV_LOG(ERR, "Handle nfp pre ct flow failed.");
@@ -1730,3 +1734,43 @@ nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 	PMD_DRV_LOG(ERR, "Unsupported ct flow type.");
 	return NULL;
 }
+
+static inline void
+nfp_ct_flow_stats_update(struct nfp_flow_priv *priv,
+		struct nfp_ct_merge_entry *m_ent)
+{
+	uint32_t ctx_id;
+	struct nfp_fl_stats *merge_stats;
+
+	ctx_id = m_ent->ctx_id;
+	merge_stats = &priv->stats[ctx_id];
+
+	m_ent->pre_ct_parent->stats.bytes  += merge_stats->bytes;
+	m_ent->pre_ct_parent->stats.pkts   += merge_stats->pkts;
+	m_ent->post_ct_parent->stats.bytes += merge_stats->bytes;
+	m_ent->post_ct_parent->stats.pkts  += merge_stats->pkts;
+
+	merge_stats->bytes = 0;
+	merge_stats->pkts = 0;
+}
+
+struct nfp_fl_stats *
+nfp_ct_flow_stats_get(struct nfp_flow_priv *priv,
+		struct nfp_ct_map_entry *me)
+{
+	struct nfp_ct_merge_entry *m_ent;
+
+	rte_spinlock_lock(&priv->stats_lock);
+
+	if (me->fe->type == CT_TYPE_PRE_CT) {
+		LIST_FOREACH(m_ent, &me->fe->children, pre_ct_list)
+			nfp_ct_flow_stats_update(priv, m_ent);
+	} else {
+		LIST_FOREACH(m_ent, &me->fe->children, post_ct_list)
+			nfp_ct_flow_stats_update(priv, m_ent);
+	}
+
+	rte_spinlock_unlock(&priv->stats_lock);
+
+	return &me->fe->stats;
+}
diff --git a/drivers/net/nfp/flower/nfp_conntrack.h b/drivers/net/nfp/flower/nfp_conntrack.h
index 2f47280716..5abab4e984 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.h
+++ b/drivers/net/nfp/flower/nfp_conntrack.h
@@ -34,4 +34,7 @@ struct rte_flow *nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 		bool validate_flag,
 		uint64_t cookie);
 
+struct nfp_fl_stats *nfp_ct_flow_stats_get(struct nfp_flow_priv *priv,
+		struct nfp_ct_map_entry *me);
+
 #endif /* __NFP_CONNTRACK_H__ */
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index a6439679d3..020e31e9de 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -310,14 +310,14 @@ nfp_check_mask_add(struct nfp_flow_priv *priv,
 		ret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id);
 		if (ret != 0)
 			return false;
-
-		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
 	} else {
 		/* mask entry already exist */
 		mask_entry->ref_cnt++;
 		*mask_id = mask_entry->mask_id;
 	}
 
+	*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
+
 	return true;
 }
 
@@ -349,7 +349,7 @@ nfp_check_mask_remove(struct nfp_flow_priv *priv,
 	return true;
 }
 
-int
+static int
 nfp_flow_table_add(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow)
 {
@@ -396,6 +396,48 @@ nfp_flow_table_search(struct nfp_flow_priv *priv,
 	return flow_find;
 }
 
+int
+nfp_flow_table_add_merge(struct nfp_flow_priv *priv,
+		struct rte_flow *nfp_flow)
+{
+	struct rte_flow *flow_find;
+
+	flow_find = nfp_flow_table_search(priv, nfp_flow);
+	if (flow_find != NULL) {
+		if (nfp_flow->merge_flag || flow_find->merge_flag) {
+			flow_find->merge_flag = true;
+			flow_find->ref_cnt++;
+			return 0;
+		}
+
+		PMD_DRV_LOG(ERR, "Add to flow table failed.");
+		return -EINVAL;
+	}
+
+	return nfp_flow_table_add(priv, nfp_flow);
+}
+
+static int
+nfp_flow_table_delete_merge(struct nfp_flow_priv *priv,
+		struct rte_flow *nfp_flow)
+{
+	struct rte_flow *flow_find;
+
+	flow_find = nfp_flow_table_search(priv, nfp_flow);
+	if (flow_find == NULL) {
+		PMD_DRV_LOG(ERR, "Can't delete a non-existing flow.");
+		return -EINVAL;
+	}
+
+	if (nfp_flow->merge_flag || flow_find->merge_flag) {
+		flow_find->ref_cnt--;
+		if (flow_find->ref_cnt > 0)
+			return 0;
+	}
+
+	return nfp_flow_table_delete(priv, nfp_flow);
+}
+
 static struct rte_flow *
 nfp_flow_alloc(struct nfp_fl_key_ls *key_layer, uint32_t port_id)
 {
@@ -1082,6 +1124,9 @@ nfp_flow_key_layers_calculate_actions(const struct rte_flow_action actions[],
 				return -ENOTSUP;
 			}
 			break;
+		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_CONNTRACK detected");
+			break;
 		default:
 			PMD_DRV_LOG(ERR, "Action type %d not supported.", action->type);
 			return -ENOTSUP;
@@ -3626,6 +3671,9 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
 				return -EINVAL;
 			position += sizeof(struct nfp_fl_act_meter);
 			break;
+		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_CONNTRACK");
+			break;
 		default:
 			PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type);
 			return -ENOTSUP;
@@ -3647,7 +3695,8 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 		const struct rte_flow_action actions[],
 		bool validate_flag,
 		uint64_t cookie,
-		bool install_flag)
+		bool install_flag,
+		bool merge_flag)
 {
 	int ret;
 	char *hash_data;
@@ -3684,6 +3733,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 	}
 
 	nfp_flow->install_flag = install_flag;
+	nfp_flow->merge_flag = merge_flag;
 
 	nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx, cookie);
 
@@ -3717,7 +3767,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 
 	/* Find the flow in hash table */
 	flow_find = nfp_flow_table_search(priv, nfp_flow);
-	if (flow_find != NULL) {
+	if (flow_find != NULL && !nfp_flow->merge_flag && !flow_find->merge_flag) {
 		PMD_DRV_LOG(ERR, "This flow is already exist.");
 		if (!nfp_check_mask_remove(priv, mask_data, mask_len,
 				&nfp_flow_meta->flags)) {
@@ -3774,7 +3824,7 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 		return nfp_ct_flow_setup(representor, items, actions,
 				ct_item, validate_flag, cookie);
 
-	return nfp_flow_process(representor, items, actions, validate_flag, cookie, true);
+	return nfp_flow_process(representor, items, actions, validate_flag, cookie, true, false);
 }
 
 int
@@ -3877,7 +3927,7 @@ nfp_flow_create(struct rte_eth_dev *dev,
 	}
 
 	/* Add the flow to flow hash table */
-	ret = nfp_flow_table_add(priv, nfp_flow);
+	ret = nfp_flow_table_add_merge(priv, nfp_flow);
 	if (ret != 0) {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				NULL, "Add flow to the flow table failed.");
@@ -3988,7 +4038,7 @@ nfp_flow_destroy(struct rte_eth_dev *dev,
 	}
 
 	/* Delete the flow from flow hash table */
-	ret = nfp_flow_table_delete(priv, nfp_flow);
+	ret = nfp_flow_table_delete_merge(priv, nfp_flow);
 	if (ret != 0) {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				NULL, "Delete flow from the flow table failed.");
@@ -4047,10 +4097,12 @@ nfp_flow_stats_get(struct rte_eth_dev *dev,
 		void *data)
 {
 	bool reset;
+	uint64_t cookie;
 	uint32_t ctx_id;
 	struct rte_flow *flow;
 	struct nfp_flow_priv *priv;
 	struct nfp_fl_stats *stats;
+	struct nfp_ct_map_entry *me;
 	struct rte_flow_query_count *query;
 
 	priv = nfp_flow_dev_to_priv(dev);
@@ -4064,8 +4116,15 @@ nfp_flow_stats_get(struct rte_eth_dev *dev,
 	reset = query->reset;
 	memset(query, 0, sizeof(*query));
 
-	ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id);
-	stats = &priv->stats[ctx_id];
+	/* Find the flow in ct_map_table */
+	cookie = rte_be_to_cpu_64(nfp_flow->payload.meta->host_cookie);
+	me = nfp_ct_map_table_search(priv, (char *)&cookie, sizeof(uint64_t));
+	if (me != NULL) {
+		stats = nfp_ct_flow_stats_get(priv, me);
+	} else {
+		ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id);
+		stats = &priv->stats[ctx_id];
+	}
 
 	rte_spinlock_lock(&priv->stats_lock);
 	if (stats->pkts != 0 && stats->bytes != 0) {
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index df16cab8b5..ed06eca371 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -165,7 +165,9 @@ struct rte_flow {
 	uint32_t port_id;
 	bool install_flag;
 	bool tcp_flag;    /**< Used in the SET_TP_* action */
+	bool merge_flag;
 	enum nfp_flow_type type;
+	uint16_t ref_cnt;
 };
 
 /* Forward declaration */
@@ -181,8 +183,9 @@ struct rte_flow *nfp_flow_process(struct nfp_flower_representor *representor,
 		const struct rte_flow_action actions[],
 		bool validate_flag,
 		uint64_t cookie,
-		bool install_flag);
-int nfp_flow_table_add(struct nfp_flow_priv *priv,
+		bool install_flag,
+		bool merge_flag);
+int nfp_flow_table_add_merge(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow);
 int nfp_flow_teardown(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow,
-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/4] support offload of simple conntrack flow rules
  2023-09-30 10:00 [PATCH 0/4] support offload of simple conntrack flow rules Chaoyong He
                   ` (3 preceding siblings ...)
  2023-09-30 10:00 ` [PATCH 4/4] net/nfp: add support for merged flows and conntrack stats Chaoyong He
@ 2023-10-03 12:46 ` Ferruh Yigit
  2023-10-04  9:35 ` [PATCH v2 " Chaoyong He
  5 siblings, 0 replies; 12+ messages in thread
From: Ferruh Yigit @ 2023-10-03 12:46 UTC (permalink / raw)
  To: Chaoyong He, dev; +Cc: oss-drivers

On 9/30/2023 11:00 AM, Chaoyong He wrote:
> This patch series add the support of simple conntrack flow rules offload
> through flower firmware by import the needed data structure and logic of
> flow merge.
> 
> Chaoyong He (4):
>   net/nfp: prepare for the flow merge
>   net/nfp: add infrastructure for ct flow merge
>

'ct' is conntrack, right? Can you prefer long version if possible?

>   net/nfp: add call to add and delete the flows to firmware
>   net/nfp: add support for merged flows and conntrack stats
> 
>  


'./devtools/check-doc-vs-code.sh' reports some errors, can you please check?



^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 0/4] support offload of simple conntrack flow rules
  2023-09-30 10:00 [PATCH 0/4] support offload of simple conntrack flow rules Chaoyong He
                   ` (4 preceding siblings ...)
  2023-10-03 12:46 ` [PATCH 0/4] support offload of simple conntrack flow rules Ferruh Yigit
@ 2023-10-04  9:35 ` Chaoyong He
  2023-10-04  9:35   ` [PATCH v2 1/4] net/nfp: prepare for the flow merge Chaoyong He
                     ` (4 more replies)
  5 siblings, 5 replies; 12+ messages in thread
From: Chaoyong He @ 2023-10-04  9:35 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

This patch series add the support of simple conntrack flow rules offload
through flower firmware by import the needed data structure and logic of
flow merge.

---
v2:
* Fix one mis-spell in comment.
* Revise logic and document to solve the 'devtools/check-doc-vs-code.sh'
  warning.
* Adjust the commit message as the advice of reviewer.
---

Chaoyong He (4):
  net/nfp: prepare for the flow merge
  net/nfp: add infrastructure for conntrack flow merge
  net/nfp: add call to add and delete the flows to firmware
  net/nfp: add support for merged flows and conntrack stats

 doc/guides/nics/features/nfp.ini       |    2 +
 drivers/net/nfp/flower/nfp_conntrack.c | 1766 ++++++++++++++++++++++++
 drivers/net/nfp/flower/nfp_conntrack.h |   40 +
 drivers/net/nfp/meson.build            |    1 +
 drivers/net/nfp/nfp_flow.c             |  177 ++-
 drivers/net/nfp/nfp_flow.h             |   38 +
 6 files changed, 1995 insertions(+), 29 deletions(-)
 create mode 100644 drivers/net/nfp/flower/nfp_conntrack.c
 create mode 100644 drivers/net/nfp/flower/nfp_conntrack.h

-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 1/4] net/nfp: prepare for the flow merge
  2023-10-04  9:35 ` [PATCH v2 " Chaoyong He
@ 2023-10-04  9:35   ` Chaoyong He
  2023-10-04  9:36   ` [PATCH v2 2/4] net/nfp: add infrastructure for conntrack " Chaoyong He
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Chaoyong He @ 2023-10-04  9:35 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

Move data structure and macro from source file to header file.
Export the needed function to header file.

We add two more parameter for 'nfp_flow_process()' to prepare for the
flow merge.
The 'cookie' moved as parameter is because the flow merge logic need
this cookie.
The 'install' parameter is needed because in flow merge, some flow are
not need install to the hardware.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/nfp_flow.c | 42 +++++++++++++++++---------------------
 drivers/net/nfp/nfp_flow.h | 31 ++++++++++++++++++++++++++++
 2 files changed, 50 insertions(+), 23 deletions(-)

diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index aa286535f7..1bb93bcfb5 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -89,17 +89,6 @@
 /* Tunnel ports */
 #define NFP_FL_PORT_TYPE_TUN            0x50000000
 
-/*
- * Maximum number of items in struct rte_flow_action_vxlan_encap.
- * ETH / IPv4(6) / UDP / VXLAN / END
- */
-#define ACTION_VXLAN_ENCAP_ITEMS_NUM 5
-
-struct vxlan_data {
-	struct rte_flow_action_vxlan_encap conf;
-	struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
-};
-
 /* Static initializer for a list of subsequent item types */
 #define NEXT_ITEM(...) \
 	((const enum rte_flow_item_type []){ \
@@ -359,7 +348,7 @@ nfp_check_mask_remove(struct nfp_flow_priv *priv,
 	return true;
 }
 
-static int
+int
 nfp_flow_table_add(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow)
 {
@@ -440,7 +429,7 @@ nfp_flow_alloc(struct nfp_fl_key_ls *key_layer, uint32_t port_id)
 	return NULL;
 }
 
-static void
+void
 nfp_flow_free(struct rte_flow *nfp_flow)
 {
 	rte_free(nfp_flow->payload.meta);
@@ -721,7 +710,8 @@ static void
 nfp_flow_compile_metadata(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow,
 		struct nfp_fl_key_ls *key_layer,
-		uint32_t stats_ctx)
+		uint32_t stats_ctx,
+		uint64_t cookie)
 {
 	struct nfp_fl_rule_metadata *nfp_flow_meta;
 	char *mbuf_off_exact;
@@ -737,7 +727,7 @@ nfp_flow_compile_metadata(struct nfp_flow_priv *priv,
 	nfp_flow_meta->act_len      = key_layer->act_size >> NFP_FL_LW_SIZ;
 	nfp_flow_meta->flags        = 0;
 	nfp_flow_meta->host_ctx_id  = rte_cpu_to_be_32(stats_ctx);
-	nfp_flow_meta->host_cookie  = rte_rand();
+	nfp_flow_meta->host_cookie  = rte_cpu_to_be_64(cookie);
 	nfp_flow_meta->flow_version = rte_cpu_to_be_64(priv->flower_version);
 
 	mbuf_off_exact = nfp_flow->payload.unmasked_data;
@@ -1958,7 +1948,7 @@ nfp_flow_is_tun_item(const struct rte_flow_item *item)
 	return false;
 }
 
-static bool
+bool
 nfp_flow_inner_item_get(const struct rte_flow_item items[],
 		const struct rte_flow_item **inner_item)
 {
@@ -3650,11 +3640,13 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
 	return 0;
 }
 
-static struct rte_flow *
+struct rte_flow *
 nfp_flow_process(struct nfp_flower_representor *representor,
 		const struct rte_flow_item items[],
 		const struct rte_flow_action actions[],
-		bool validate_flag)
+		bool validate_flag,
+		uint64_t cookie,
+		bool install_flag)
 {
 	int ret;
 	char *hash_data;
@@ -3690,9 +3682,9 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 		goto free_stats;
 	}
 
-	nfp_flow->install_flag = true;
+	nfp_flow->install_flag = install_flag;
 
-	nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx);
+	nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx, cookie);
 
 	ret = nfp_flow_compile_items(representor, items, nfp_flow);
 	if (ret != 0) {
@@ -3755,6 +3747,8 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 		__rte_unused struct rte_flow_error *error,
 		bool validate_flag)
 {
+	uint64_t cookie;
+
 	if (attr->group != 0)
 		PMD_DRV_LOG(INFO, "Pretend we support group attribute.");
 
@@ -3764,10 +3758,12 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 	if (attr->transfer != 0)
 		PMD_DRV_LOG(INFO, "Pretend we support transfer attribute.");
 
-	return nfp_flow_process(representor, items, actions, validate_flag);
+	cookie = rte_rand();
+
+	return nfp_flow_process(representor, items, actions, validate_flag, cookie, true);
 }
 
-static int
+int
 nfp_flow_teardown(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow,
 		bool validate_flag)
@@ -3895,7 +3891,7 @@ nfp_flow_create(struct rte_eth_dev *dev,
 	return NULL;
 }
 
-static int
+int
 nfp_flow_destroy(struct rte_eth_dev *dev,
 		struct rte_flow *nfp_flow,
 		struct rte_flow_error *error)
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index 7ce7f62453..817eaecba2 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -11,6 +11,17 @@
 /* The firmware expects lengths in units of long words */
 #define NFP_FL_LW_SIZ                   2
 
+/*
+ * Maximum number of items in struct rte_flow_action_vxlan_encap.
+ * ETH / IPv4(6) / UDP / VXLAN / END
+ */
+#define ACTION_VXLAN_ENCAP_ITEMS_NUM 5
+
+struct vxlan_data {
+	struct rte_flow_action_vxlan_encap conf;
+	struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
+};
+
 enum nfp_flower_tun_type {
 	NFP_FL_TUN_NONE   = 0,
 	NFP_FL_TUN_GRE    = 1,
@@ -153,8 +164,28 @@ struct rte_flow {
 	enum nfp_flow_type type;
 };
 
+/* Forward declaration */
+struct nfp_flower_representor;
+
 int nfp_flow_priv_init(struct nfp_pf_dev *pf_dev);
 void nfp_flow_priv_uninit(struct nfp_pf_dev *pf_dev);
 int nfp_net_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
+bool nfp_flow_inner_item_get(const struct rte_flow_item items[],
+		const struct rte_flow_item **inner_item);
+struct rte_flow *nfp_flow_process(struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		bool validate_flag,
+		uint64_t cookie,
+		bool install_flag);
+int nfp_flow_table_add(struct nfp_flow_priv *priv,
+		struct rte_flow *nfp_flow);
+int nfp_flow_teardown(struct nfp_flow_priv *priv,
+		struct rte_flow *nfp_flow,
+		bool validate_flag);
+void nfp_flow_free(struct rte_flow *nfp_flow);
+int nfp_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *nfp_flow,
+		struct rte_flow_error *error);
 
 #endif /* _NFP_FLOW_H_ */
-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 2/4] net/nfp: add infrastructure for conntrack flow merge
  2023-10-04  9:35 ` [PATCH v2 " Chaoyong He
  2023-10-04  9:35   ` [PATCH v2 1/4] net/nfp: prepare for the flow merge Chaoyong He
@ 2023-10-04  9:36   ` Chaoyong He
  2023-10-04  9:36   ` [PATCH v2 3/4] net/nfp: add call to add and delete the flows to firmware Chaoyong He
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Chaoyong He @ 2023-10-04  9:36 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

Add the logic to process the merge of items and actions of
pre_ct and post_ct flow. The result will be stored in a field
of merged flow.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 doc/guides/nics/features/nfp.ini       |    2 +
 drivers/net/nfp/flower/nfp_conntrack.c | 1616 ++++++++++++++++++++++++
 drivers/net/nfp/flower/nfp_conntrack.h |   32 +
 drivers/net/nfp/meson.build            |    1 +
 drivers/net/nfp/nfp_flow.c             |   56 +
 drivers/net/nfp/nfp_flow.h             |    4 +
 6 files changed, 1711 insertions(+)
 create mode 100644 drivers/net/nfp/flower/nfp_conntrack.c
 create mode 100644 drivers/net/nfp/flower/nfp_conntrack.h

diff --git a/doc/guides/nics/features/nfp.ini b/doc/guides/nics/features/nfp.ini
index 4264943f05..b53af7b60a 100644
--- a/doc/guides/nics/features/nfp.ini
+++ b/doc/guides/nics/features/nfp.ini
@@ -28,6 +28,7 @@ x86-64               = Y
 Usage doc            = Y
 
 [rte_flow items]
+conntrack            = Y
 eth                  = Y
 geneve               = Y
 gre                  = Y
@@ -42,6 +43,7 @@ vlan                 = Y
 vxlan                = Y
 
 [rte_flow actions]
+conntrack            = Y
 count                = Y
 drop                 = Y
 jump                 = Y
diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
new file mode 100644
index 0000000000..4fa6fdff99
--- /dev/null
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -0,0 +1,1616 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include "nfp_conntrack.h"
+
+#include <rte_malloc.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+
+#include "../nfp_flow.h"
+#include "../nfp_logs.h"
+#include "nfp_flower_representor.h"
+
+struct ct_data {
+	uint8_t  ct_state;        /* Connection state. */
+	uint16_t ct_zone;         /* Connection zone. */
+};
+
+enum ct_entry_type {
+	CT_TYPE_PRE_CT,
+	CT_TYPE_POST_CT,
+};
+
+struct nfp_initial_flow {
+	struct rte_flow_item *items;
+	struct rte_flow_action *actions;
+	uint8_t items_cnt;
+	uint8_t actions_cnt;
+};
+
+struct nfp_ct_flow_entry {
+	uint64_t cookie;
+	LIST_ENTRY(nfp_ct_flow_entry) pre_ct_list;
+	LIST_ENTRY(nfp_ct_flow_entry) post_ct_list;
+	LIST_HEAD(, nfp_ct_merge_entry) children;
+	enum ct_entry_type type;
+	struct nfp_flower_representor *repr;
+	struct nfp_ct_zone_entry *ze;
+	struct nfp_initial_flow rule;
+};
+
+struct nfp_ct_map_entry {
+	uint64_t cookie;
+	struct nfp_ct_flow_entry *fe;
+};
+
+struct nfp_ct_zone_entry {
+	uint32_t zone;
+	struct nfp_flow_priv *priv;
+	LIST_HEAD(, nfp_ct_flow_entry) pre_ct_list;
+	LIST_HEAD(, nfp_ct_flow_entry) post_ct_list;
+	struct rte_hash *ct_merge_table;
+};
+
+struct nfp_ct_merge_entry {
+	uint64_t cookie[2];
+	LIST_ENTRY(nfp_ct_merge_entry) pre_ct_list;
+	LIST_ENTRY(nfp_ct_merge_entry) post_ct_list;
+	struct nfp_initial_flow rule;
+	struct nfp_ct_zone_entry *ze;
+	struct nfp_ct_flow_entry *pre_ct_parent;
+	struct nfp_ct_flow_entry *post_ct_parent;
+};
+
+/* OVS_KEY_ATTR_CT_STATE flags */
+#define OVS_CS_F_NEW            0x01 /* Beginning of a new connection. */
+#define OVS_CS_F_ESTABLISHED    0x02 /* Part of an existing connection. */
+#define OVS_CS_F_RELATED        0x04 /* Related to an established connection. */
+#define OVS_CS_F_REPLY_DIR      0x08 /* Flow is in the reply direction. */
+#define OVS_CS_F_INVALID        0x10 /* Could not track connection. */
+#define OVS_CS_F_TRACKED        0x20 /* Conntrack has occurred. */
+#define OVS_CS_F_SRC_NAT        0x40 /* Packet's source address/port was mangled by NAT. */
+#define OVS_CS_F_DST_NAT        0x80 /* Packet's destination address/port was mangled by NAT. */
+
+typedef void (*nfp_action_free_fn)(void *field);
+typedef bool (*nfp_action_copy_fn)(const void *src, void *dst);
+
+static bool
+is_pre_ct_flow(const struct ct_data *ct,
+		const struct rte_flow_action *actions)
+{
+	const struct rte_flow_action *action;
+
+	if (ct == NULL)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
+		if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK)
+			return true;
+	}
+
+	return false;
+}
+
+static bool
+is_post_ct_flow(const struct ct_data *ct)
+{
+	if (ct == NULL)
+		return false;
+
+	if ((ct->ct_state & OVS_CS_F_ESTABLISHED) != 0)
+		return true;
+
+	return false;
+}
+
+static bool
+is_ct_commit_flow(const struct ct_data *ct)
+{
+	if (ct == NULL)
+		return false;
+
+	if ((ct->ct_state & OVS_CS_F_NEW) != 0)
+		return true;
+
+	return false;
+}
+
+static struct nfp_ct_merge_entry *
+nfp_ct_merge_table_search(struct nfp_ct_zone_entry *ze,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int index;
+	uint32_t hash_key;
+	struct nfp_ct_merge_entry *m_ent;
+
+	hash_key = rte_jhash(hash_data, hash_len, ze->priv->hash_seed);
+	index = rte_hash_lookup_data(ze->ct_merge_table, &hash_key, (void **)&m_ent);
+	if (index < 0) {
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_merge table");
+		return NULL;
+	}
+
+	return m_ent;
+}
+
+static bool
+nfp_ct_merge_table_add(struct nfp_ct_zone_entry *ze,
+		struct nfp_ct_merge_entry *merge_entry)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(merge_entry, sizeof(uint64_t) * 2, ze->priv->hash_seed);
+	ret = rte_hash_add_key_data(ze->ct_merge_table, &hash_key, merge_entry);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add to ct_merge table failed");
+		return false;
+	}
+
+	return true;
+}
+
+static void
+nfp_ct_merge_table_delete(struct nfp_ct_zone_entry *ze,
+		struct nfp_ct_merge_entry *m_ent)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(m_ent, sizeof(uint64_t) * 2, ze->priv->hash_seed);
+	ret = rte_hash_del_key(ze->ct_merge_table, &hash_key);
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "Delete from ct_merge table failed, ret=%d", ret);
+}
+
+static void
+nfp_ct_merge_entry_destroy(struct nfp_ct_merge_entry *m_ent)
+{
+	struct nfp_ct_zone_entry *ze;
+
+	ze = m_ent->ze;
+	nfp_ct_merge_table_delete(ze, m_ent);
+
+	rte_free(m_ent->rule.actions);
+	rte_free(m_ent->rule.items);
+	LIST_REMOVE(m_ent, pre_ct_list);
+	LIST_REMOVE(m_ent, post_ct_list);
+	rte_free(m_ent);
+}
+
+struct nfp_ct_map_entry *
+nfp_ct_map_table_search(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int index;
+	uint32_t hash_key;
+	struct nfp_ct_map_entry *me;
+
+	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+	index = rte_hash_lookup_data(priv->ct_map_table, &hash_key, (void **)&me);
+	if (index < 0) {
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_map table");
+		return NULL;
+	}
+
+	return me;
+}
+
+static bool
+nfp_ct_map_table_add(struct nfp_flow_priv *priv,
+		struct nfp_ct_map_entry *me)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed);
+	ret = rte_hash_add_key_data(priv->ct_map_table, &hash_key, me);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add to ct_map table failed");
+		return false;
+	}
+
+	return true;
+}
+
+static void
+nfp_ct_map_table_delete(struct nfp_flow_priv *priv,
+		struct nfp_ct_map_entry *me)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(me, sizeof(uint64_t), priv->hash_seed);
+	ret = rte_hash_del_key(priv->ct_map_table, &hash_key);
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "Delete form ct_map table failed");
+}
+
+static void
+nfp_ct_map_entry_destroy(struct nfp_ct_map_entry *me)
+{
+	rte_free(me);
+}
+
+static void
+nfp_ct_flow_item_free_real(void *field,
+		enum rte_flow_item_type type)
+{
+	switch (type) {
+	case RTE_FLOW_ITEM_TYPE_VOID:
+		break;
+	case RTE_FLOW_ITEM_TYPE_ETH:        /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_VLAN:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_IPV4:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_IPV6:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_TCP:        /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_UDP:        /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_SCTP:       /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_VXLAN:      /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_GRE:        /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_GRE_KEY:    /* FALLTHROUGH */
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		rte_free(field);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+nfp_ct_flow_item_free(struct rte_flow_item *item)
+{
+	if (item->spec != NULL)
+		nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->spec, item->type);
+
+	if (item->mask != NULL)
+		nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->mask, item->type);
+
+	if (item->last != NULL)
+		nfp_ct_flow_item_free_real((void *)(ptrdiff_t)item->last, item->type);
+}
+
+static void
+nfp_ct_flow_items_free(struct rte_flow_item *items,
+		uint8_t item_cnt)
+{
+	uint8_t loop;
+
+	for (loop = 0; loop < item_cnt; ++loop)
+		nfp_ct_flow_item_free(items + loop);
+}
+
+static bool
+nfp_flow_item_conf_size_get(enum rte_flow_item_type type,
+		size_t *size)
+{
+	size_t len = 0;
+
+	switch (type) {
+	case RTE_FLOW_ITEM_TYPE_VOID:
+		break;
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		len = sizeof(struct rte_flow_item_eth);
+		break;
+	case RTE_FLOW_ITEM_TYPE_VLAN:
+		len = sizeof(struct rte_flow_item_vlan);
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		len = sizeof(struct rte_flow_item_ipv4);
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		len = sizeof(struct rte_flow_item_ipv6);
+		break;
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		len = sizeof(struct rte_flow_item_tcp);
+		break;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		len = sizeof(struct rte_flow_item_udp);
+		break;
+	case RTE_FLOW_ITEM_TYPE_SCTP:
+		len = sizeof(struct rte_flow_item_sctp);
+		break;
+	case RTE_FLOW_ITEM_TYPE_VXLAN:
+		len = sizeof(struct rte_flow_item_vxlan);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GRE:
+		len = sizeof(struct rte_flow_item_gre);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+		len = sizeof(rte_be32_t);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		len = sizeof(struct rte_flow_item_geneve);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported item type: %d", type);
+		return false;
+	}
+
+	*size = len;
+
+	return true;
+}
+
+static void *
+nfp_ct_flow_item_copy_real(const void *src,
+		enum rte_flow_item_type type)
+{
+	bool ret;
+	void *dst;
+	size_t len;
+
+	ret = nfp_flow_item_conf_size_get(type, &len);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Get flow item conf size failed");
+		return NULL;
+	}
+
+	dst = rte_zmalloc("flow_item", len, 0);
+	if (dst == NULL) {
+		PMD_DRV_LOG(ERR, "Malloc memory for ct item failed");
+		return NULL;
+	}
+
+	rte_memcpy(dst, src, len);
+
+	return dst;
+}
+
+static bool
+nfp_ct_flow_item_copy(const struct rte_flow_item *src,
+		struct rte_flow_item *dst)
+{
+	dst->type = src->type;
+
+	if (src->spec != NULL) {
+		dst->spec = nfp_ct_flow_item_copy_real(src->spec, src->type);
+		if (dst->spec == NULL) {
+			PMD_DRV_LOG(ERR, "Copy spec of ct item failed");
+			goto end;
+		}
+	}
+
+	if (src->mask != NULL) {
+		dst->mask = nfp_ct_flow_item_copy_real(src->mask, src->type);
+		if (dst->mask == NULL) {
+			PMD_DRV_LOG(ERR, "Copy mask of ct item failed");
+			goto free_spec;
+		}
+	}
+
+	if (src->last != NULL) {
+		dst->last = nfp_ct_flow_item_copy_real(src->last, src->type);
+		if (dst->last == NULL) {
+			PMD_DRV_LOG(ERR, "Copy last of ct item failed");
+			goto free_mask;
+		}
+	}
+
+	return true;
+
+free_mask:
+	nfp_ct_flow_item_free_real((void *)(ptrdiff_t)dst->mask, dst->type);
+free_spec:
+	nfp_ct_flow_item_free_real((void *)(ptrdiff_t)dst->spec, dst->type);
+end:
+	return false;
+}
+
+static bool
+nfp_ct_flow_items_copy(const struct rte_flow_item *src,
+		struct rte_flow_item *dst,
+		uint8_t item_cnt)
+{
+	bool ret;
+	uint8_t loop;
+
+	for (loop = 0; loop < item_cnt; ++loop) {
+		ret = nfp_ct_flow_item_copy(src + loop, dst + loop);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "Copy ct item failed");
+			nfp_ct_flow_items_free(dst, loop);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void
+nfp_ct_flow_action_free_real(void *field,
+		nfp_action_free_fn func)
+{
+	if (func != NULL)
+		func(field);
+
+	rte_free(field);
+}
+
+static void
+nfp_ct_flow_action_free_vxlan(void *field)
+{
+	struct vxlan_data *vxlan = field;
+
+	nfp_ct_flow_items_free(vxlan->items, ACTION_VXLAN_ENCAP_ITEMS_NUM);
+}
+
+static void
+nfp_ct_flow_action_free_raw(void *field)
+{
+	struct rte_flow_action_raw_encap *raw_encap = field;
+
+	rte_free(raw_encap->data);
+}
+
+static void
+nfp_ct_flow_action_free(struct rte_flow_action *action)
+{
+	nfp_action_free_fn func = NULL;
+
+	if (action->conf == NULL)
+		return;
+
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_VOID:           /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_DROP:           /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_COUNT:          /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_JUMP:           /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+		return;
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:        /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TTL:        /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:     /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+		break;
+	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		func = nfp_ct_flow_action_free_vxlan;
+		break;
+	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+		func = nfp_ct_flow_action_free_raw;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type);
+		break;
+	}
+
+	nfp_ct_flow_action_free_real((void *)(ptrdiff_t)action->conf, func);
+}
+
+static void
+nfp_ct_flow_actions_free(struct rte_flow_action *actions,
+		uint8_t action_cnt)
+{
+	uint8_t loop;
+
+	for (loop = 0; loop < action_cnt; ++loop)
+		nfp_ct_flow_action_free(actions + loop);
+}
+
+static void *
+nfp_ct_flow_action_copy_real(const void *src,
+		size_t len,
+		nfp_action_copy_fn func)
+{
+	bool ret;
+	void *dst;
+
+	dst = rte_zmalloc("flow_action", len, 0);
+	if (dst == NULL) {
+		PMD_DRV_LOG(ERR, "Malloc memory for ct action failed");
+		return NULL;
+	}
+
+	if (func != NULL) {
+		ret = func(src, dst);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "Copy ct action failed");
+			return NULL;
+		}
+
+		return dst;
+	}
+
+	rte_memcpy(dst, src, len);
+
+	return dst;
+}
+
+static bool
+nfp_ct_flow_action_copy_vxlan(const void *src,
+		void *dst)
+{
+	struct vxlan_data *vxlan_dst = dst;
+	const struct vxlan_data *vxlan_src = src;
+
+	vxlan_dst->conf.definition = vxlan_dst->items;
+	return nfp_ct_flow_items_copy(vxlan_src->items, vxlan_dst->items,
+			ACTION_VXLAN_ENCAP_ITEMS_NUM);
+}
+
+static bool
+nfp_ct_flow_action_copy_raw(const void *src,
+		void *dst)
+{
+	struct rte_flow_action_raw_encap *raw_dst = dst;
+	const struct rte_flow_action_raw_encap *raw_src = src;
+
+	raw_dst->size = raw_src->size;
+	raw_dst->data = nfp_ct_flow_action_copy_real(raw_src->data,
+			raw_src->size, NULL);
+	if (raw_dst->data == NULL) {
+		PMD_DRV_LOG(ERR, "Copy ct action process failed");
+		return false;
+	}
+
+	return true;
+}
+
+static bool
+nfp_ct_flow_action_copy(const struct rte_flow_action *src,
+		struct rte_flow_action *dst)
+{
+	size_t len;
+	nfp_action_copy_fn func = NULL;
+
+	dst->type = src->type;
+
+	if (src->conf == NULL)
+		return true;
+
+	switch (src->type) {
+	case RTE_FLOW_ACTION_TYPE_VOID:         /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_DROP:         /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_COUNT:        /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_JUMP:         /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+		return true;
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+		len = sizeof(struct rte_flow_action_set_mac);
+		break;
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:
+		len = sizeof(struct rte_flow_action_port_id);
+		break;
+	case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+		len = sizeof(struct rte_flow_action_of_push_vlan);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+		len = sizeof(struct rte_flow_action_set_ipv4);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+		len = sizeof(struct rte_flow_action_set_dscp);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+		len = sizeof(struct rte_flow_action_set_ipv6);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_TTL:
+		len = sizeof(struct rte_flow_action_set_ttl);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:  /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+		len = sizeof(struct rte_flow_action_set_tp);
+		break;
+	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		len = sizeof(struct vxlan_data);
+		func = nfp_ct_flow_action_copy_vxlan;
+		break;
+	case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+		len = sizeof(struct rte_flow_action_raw_encap);
+		func = nfp_ct_flow_action_copy_raw;
+		break;
+	default:
+		PMD_DRV_LOG(DEBUG, "Unsupported action type: %d", src->type);
+		return false;
+	}
+
+	dst->conf = nfp_ct_flow_action_copy_real(src->conf, len, func);
+	if (dst->conf == NULL) {
+		PMD_DRV_LOG(DEBUG, "Copy ct action process failed");
+		return false;
+	}
+
+	return true;
+}
+
+static bool
+nfp_ct_flow_actions_copy(const struct rte_flow_action *src,
+		struct rte_flow_action *dst,
+		uint8_t action_cnt)
+{
+	bool ret;
+	uint8_t loop;
+
+	for (loop = 0; loop < action_cnt; ++loop) {
+		ret = nfp_ct_flow_action_copy(src + loop, dst + loop);
+		if (!ret) {
+			PMD_DRV_LOG(DEBUG, "Copy ct action failed");
+			nfp_ct_flow_actions_free(dst, loop);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static struct nfp_ct_flow_entry *
+nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze,
+		struct nfp_flower_representor *repr,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		uint64_t cookie)
+{
+	bool ret;
+	uint8_t loop;
+	uint8_t item_cnt = 1;      /* the RTE_FLOW_ITEM_TYPE_END */
+	uint8_t action_cnt = 1;    /* the RTE_FLOW_ACTION_TYPE_END */
+	struct nfp_flow_priv *priv;
+	struct nfp_ct_map_entry *me;
+	struct nfp_ct_flow_entry *fe;
+
+	fe = rte_zmalloc("ct_flow_entry", sizeof(*fe), 0);
+	if (fe == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc ct_flow entry");
+		return NULL;
+	}
+
+	fe->ze = ze;
+	fe->repr = repr;
+	fe->cookie = cookie;
+	LIST_INIT(&fe->children);
+
+	for (loop = 0; (items + loop)->type != RTE_FLOW_ITEM_TYPE_END; loop++)
+		item_cnt++;
+	for (loop = 0; (actions + loop)->type != RTE_FLOW_ACTION_TYPE_END; loop++)
+		action_cnt++;
+
+	fe->rule.items = rte_zmalloc("ct_flow_item",
+			sizeof(struct rte_flow_item) * item_cnt, 0);
+	if (fe->rule.items == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc ct flow items");
+		goto free_flow_entry;
+	}
+
+	fe->rule.actions = rte_zmalloc("ct_flow_action",
+			sizeof(struct rte_flow_action) * action_cnt, 0);
+	if (fe->rule.actions == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc ct flow actions");
+		goto free_flow_item;
+	}
+
+	/* Deep copy of items */
+	ret = nfp_ct_flow_items_copy(items, fe->rule.items, item_cnt);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Could not deep copy ct flow items");
+		goto free_flow_action;
+	}
+
+	/* Deep copy of actions */
+	ret = nfp_ct_flow_actions_copy(actions, fe->rule.actions, action_cnt);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Could not deep copy ct flow actions");
+		goto free_copied_items;
+	}
+
+	fe->rule.items_cnt = item_cnt;
+	fe->rule.actions_cnt = action_cnt;
+
+	/* Now add a ct map entry */
+	me = rte_zmalloc("ct_map_entry", sizeof(*me), 0);
+	if (me == NULL) {
+		PMD_DRV_LOG(ERR, "Malloc memory for ct map entry failed");
+		goto free_copied_actions;
+	}
+
+	me->cookie = fe->cookie;
+	me->fe = fe;
+
+	priv = repr->app_fw_flower->flow_priv;
+	ret = nfp_ct_map_table_add(priv, me);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Add into ct map table failed");
+		goto free_map_entry;
+	}
+
+	return fe;
+
+free_map_entry:
+	nfp_ct_map_entry_destroy(me);
+free_copied_actions:
+	nfp_ct_flow_actions_free(fe->rule.actions, action_cnt);
+free_copied_items:
+	nfp_ct_flow_items_free(fe->rule.items, item_cnt);
+free_flow_action:
+	rte_free(fe->rule.actions);
+free_flow_item:
+	rte_free(fe->rule.items);
+free_flow_entry:
+	rte_free(fe);
+
+	return NULL;
+}
+
+static void
+nfp_flow_children_merge_free(struct nfp_ct_flow_entry *fe)
+{
+	struct nfp_ct_merge_entry *m_ent;
+
+	switch (fe->type) {
+	case CT_TYPE_PRE_CT:
+		LIST_FOREACH(m_ent, &fe->children, pre_ct_list)
+			nfp_ct_merge_entry_destroy(m_ent);
+		break;
+	case CT_TYPE_POST_CT:
+		LIST_FOREACH(m_ent, &fe->children, post_ct_list)
+			nfp_ct_merge_entry_destroy(m_ent);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+nfp_ct_flow_entry_destroy_partly(struct nfp_ct_flow_entry *fe)
+{
+	struct nfp_ct_map_entry *me;
+
+	if (!LIST_EMPTY(&fe->children))
+		nfp_flow_children_merge_free(fe);
+
+	me = nfp_ct_map_table_search(fe->ze->priv, (char *)&fe->cookie, sizeof(uint64_t));
+	if (me != NULL) {
+		nfp_ct_map_table_delete(fe->ze->priv, me);
+		nfp_ct_map_entry_destroy(me);
+	}
+
+	nfp_ct_flow_actions_free(fe->rule.actions, fe->rule.actions_cnt);
+	nfp_ct_flow_items_free(fe->rule.items, fe->rule.items_cnt);
+	rte_free(fe->rule.actions);
+	rte_free(fe->rule.items);
+	rte_free(fe);
+}
+
+static void
+nfp_ct_flow_entry_destroy(struct nfp_ct_flow_entry *fe)
+{
+	LIST_REMOVE(fe, pre_ct_list);
+	LIST_REMOVE(fe, post_ct_list);
+
+	nfp_ct_flow_entry_destroy_partly(fe);
+}
+
+static struct nfp_ct_zone_entry *
+nfp_ct_zone_table_search(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len)
+{
+	int index;
+	uint32_t hash_key;
+	struct nfp_ct_zone_entry *ze;
+
+	hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+	index = rte_hash_lookup_data(priv->ct_zone_table, &hash_key, (void **)&ze);
+	if (index < 0) {
+		PMD_DRV_LOG(DEBUG, "Data NOT found in the ct_zone table");
+		return NULL;
+	}
+
+	return ze;
+}
+
+static bool
+nfp_ct_zone_table_add(struct nfp_flow_priv *priv,
+		struct nfp_ct_zone_entry *ze)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed);
+	ret = rte_hash_add_key_data(priv->ct_zone_table, &hash_key, ze);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add to the ct_zone table failed");
+		return false;
+	}
+
+	return true;
+}
+
+static void
+nfp_ct_zone_table_delete(struct nfp_flow_priv *priv,
+		struct nfp_ct_zone_entry *ze)
+{
+	int ret;
+	uint32_t hash_key;
+
+	hash_key = rte_jhash(ze, sizeof(uint32_t), priv->hash_seed);
+	ret = rte_hash_del_key(priv->ct_zone_table, &hash_key);
+	if (ret < 0)
+		PMD_DRV_LOG(ERR, "Delete from the ct_zone table failed");
+}
+
+static bool
+nfp_ct_zone_entry_init(struct nfp_ct_zone_entry *ze,
+		struct nfp_flow_priv *priv,
+		uint32_t zone,
+		bool wildcard)
+{
+	char hash_name[RTE_HASH_NAMESIZE];
+	struct rte_hash_parameters ct_merge_hash_params = {
+		.entries    = 1000,
+		.hash_func  = rte_jhash,
+		.socket_id  = rte_socket_id(),
+		.key_len    = sizeof(uint32_t),
+		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
+	};
+
+	if (wildcard) {
+		ct_merge_hash_params.name = "ct_wc_merge_table";
+	} else {
+		snprintf(hash_name, sizeof(hash_name), "ct_%d_merge_table", ze->zone);
+		ct_merge_hash_params.name = hash_name;
+	}
+
+	ct_merge_hash_params.hash_func_init_val = priv->hash_seed;
+	ze->ct_merge_table = rte_hash_create(&ct_merge_hash_params);
+	if (ze->ct_merge_table == NULL) {
+		PMD_DRV_LOG(ERR, "ct merge table creation failed");
+		return false;
+	}
+
+	ze->zone = zone;
+	ze->priv = priv;
+	LIST_INIT(&ze->pre_ct_list);
+	LIST_INIT(&ze->post_ct_list);
+
+	return true;
+}
+
+static void
+nfp_ct_zone_entry_destroy(struct nfp_ct_zone_entry *ze)
+{
+	struct nfp_ct_flow_entry *fe;
+
+	if (ze == NULL)
+		return;
+
+	rte_hash_free(ze->ct_merge_table);
+
+	LIST_FOREACH(fe, &ze->pre_ct_list, pre_ct_list)
+		nfp_ct_flow_entry_destroy(fe);
+
+	LIST_FOREACH(fe, &ze->post_ct_list, post_ct_list)
+		nfp_ct_flow_entry_destroy(fe);
+
+	rte_free(ze);
+}
+
+static struct nfp_ct_zone_entry *
+nfp_ct_zone_entry_get(struct nfp_flow_priv *priv,
+		uint32_t zone,
+		bool wildcard)
+{
+	bool is_ok;
+	struct nfp_ct_zone_entry *ze;
+
+	if (wildcard) {
+		if (priv->ct_zone_wc != NULL)
+			return priv->ct_zone_wc;
+
+		ze = rte_zmalloc("ct_zone_wc", sizeof(*ze), 0);
+		if (ze == NULL) {
+			PMD_DRV_LOG(ERR, "Could not alloc ct_zone_wc entry");
+			return NULL;
+		}
+
+		is_ok = nfp_ct_zone_entry_init(ze, priv, zone, true);
+		if (!is_ok) {
+			PMD_DRV_LOG(ERR, "Init ct zone wc entry failed");
+			goto free_ct_zone_entry;
+		}
+
+		priv->ct_zone_wc = ze;
+	} else {
+		ze = nfp_ct_zone_table_search(priv, (char *)&zone, sizeof(uint32_t));
+		if (ze != NULL)
+			return ze;
+
+		ze = rte_zmalloc("ct_zone_entry", sizeof(*ze), 0);
+		if (ze == NULL) {
+			PMD_DRV_LOG(ERR, "Could not alloc ct_zone entry");
+			return NULL;
+		}
+
+		is_ok = nfp_ct_zone_entry_init(ze, priv, zone, false);
+		if (!is_ok) {
+			PMD_DRV_LOG(ERR, "Init ct zone entry failed");
+			goto free_ct_zone_entry;
+		}
+
+		is_ok = nfp_ct_zone_table_add(priv, ze);
+		if (!is_ok) {
+			PMD_DRV_LOG(ERR, "Add into ct zone table failed");
+			goto free_ct_zone_entry;
+		}
+	}
+
+	return ze;
+
+free_ct_zone_entry:
+	nfp_ct_zone_entry_destroy(ze);
+
+	return NULL;
+}
+
+static void
+nfp_ct_zone_entry_free(struct nfp_ct_zone_entry *ze,
+		bool wildcard)
+{
+	if (LIST_EMPTY(&ze->pre_ct_list) && LIST_EMPTY(&ze->post_ct_list)) {
+		if (!wildcard)
+			nfp_ct_zone_table_delete(ze->priv, ze);
+
+		nfp_ct_zone_entry_destroy(ze);
+	}
+}
+
+static inline bool
+is_item_check_pass(const struct rte_flow_item *item1,
+		const struct rte_flow_item *item2,
+		uint8_t *cnt_same)
+{
+	bool pass;
+	uint32_t i;
+	size_t size;
+	const char *key1 = item1->spec;
+	const char *key2 = item2->spec;
+	const char *mask1 = item1->mask;
+	const char *mask2 = item2->mask;
+
+	if (item1->type != item2->type)
+		return true;
+
+	pass = nfp_flow_item_conf_size_get(item1->type, &size);
+	if (!pass)
+		return false;
+
+	for (i = 0; i < size; i++) {
+		if ((key1[i] & mask1[i] & mask2[i]) ^ (key2[i] & mask1[i] & mask2[i]))
+			return false;
+	}
+
+	*cnt_same = *cnt_same + 1;
+
+	return true;
+}
+
+static bool
+nfp_ct_merge_items_check(struct rte_flow_item *items1,
+		struct rte_flow_item *items2,
+		uint8_t *cnt_same)
+{
+	bool pass;
+	bool is_tun_flow_1;
+	bool is_tun_flow_2;
+	const struct rte_flow_item *item1;
+	const struct rte_flow_item *item2;
+	const struct rte_flow_item *inner_item1 = NULL;
+	const struct rte_flow_item *inner_item2 = NULL;
+
+	is_tun_flow_1 = nfp_flow_inner_item_get(items1, &inner_item1);
+	is_tun_flow_2 = nfp_flow_inner_item_get(items2, &inner_item2);
+
+	if (is_tun_flow_1) {
+		if (is_tun_flow_2) {
+			/* Outer layer */
+			for (item1 = items1; item1 != inner_item1; item1++) {
+				for (item2 = items2; item2 != inner_item2; item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+			/* Inner layer */
+			for (item1 = inner_item1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) {
+				for (item2 = inner_item2; item2->type != RTE_FLOW_ITEM_TYPE_END;
+						item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+		} else {
+			for (item1 = items1; item1 != inner_item1; item1++) {
+				for (item2 = items2; item2->type != RTE_FLOW_ITEM_TYPE_END;
+						item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+		}
+	} else {
+		if (is_tun_flow_2) {
+			for (item1 = items1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) {
+				for (item2 = items2; item2 != inner_item2; item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+		} else {
+			for (item1 = items1; item1->type != RTE_FLOW_ITEM_TYPE_END; item1++) {
+				for (item2 = items2; item2->type != RTE_FLOW_ITEM_TYPE_END;
+						item2++) {
+					pass = is_item_check_pass(item1, item2, cnt_same);
+					if (!pass)
+						return false;
+				}
+			}
+		}
+	}
+
+	return true;
+}
+
+static inline bool
+is_action_pattern_check_pass(struct rte_flow_item *items,
+		enum rte_flow_item_type type)
+{
+	struct rte_flow_item *item;
+
+	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == type)
+			return false;
+	}
+
+	return true;
+}
+
+static bool
+nfp_ct_merge_action_check(struct rte_flow_action *action,
+		struct rte_flow_item *items)
+{
+	bool pass = true;
+
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+		pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_ETH);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+		pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_IPV4);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:   /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+		pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_IPV6);
+		break;
+	case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:    /* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+		pass = is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_UDP);
+		pass |= is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_TCP);
+		pass |= is_action_pattern_check_pass(items, RTE_FLOW_ITEM_TYPE_SCTP);
+		break;
+	default:
+		break;
+	}
+
+	return pass;
+}
+
+static bool
+nfp_ct_merge_actions_check(struct rte_flow_action *actions,
+		struct rte_flow_item *items,
+		uint8_t *cnt_same)
+{
+	bool pass = true;
+	struct rte_flow_action *action;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		switch (action->type) {
+		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:    /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:    /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:   /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:   /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:  /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:   /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:   /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:  /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:     /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+			pass = nfp_ct_merge_action_check(action, items);
+			break;
+		case RTE_FLOW_ACTION_TYPE_CONNTRACK: /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_JUMP:      /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_COUNT:     /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_DROP:      /* FALLTHROUGH */
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			*cnt_same = *cnt_same + 1;
+			break;
+		default:
+			pass = false;
+			break;
+		}
+	}
+
+	return pass;
+}
+
+static void
+nfp_ct_merge_item_real(const struct rte_flow_item *item_src,
+		struct rte_flow_item *item_dst)
+{
+	uint32_t i;
+	size_t size;
+	char *key_dst;
+	char *mask_dst;
+	const char *key_src;
+	const char *mask_src;
+
+	key_src = item_src->spec;
+	mask_src = item_src->mask;
+	key_dst = (char *)(ptrdiff_t)item_dst->spec;
+	mask_dst = (char *)(ptrdiff_t)item_dst->mask;
+	nfp_flow_item_conf_size_get(item_src->type, &size);
+
+	for (i = 0; i < size; i++) {
+		key_dst[i] |= key_src[i];
+		mask_dst[i] |= mask_src[i];
+	}
+}
+
+static bool
+nfp_ct_merge_item(uint32_t index,
+		const struct rte_flow_item *item1,
+		const struct rte_flow_item *item2_start,
+		const struct rte_flow_item *item2_end,
+		struct nfp_ct_merge_entry *merge_entry)
+{
+	struct rte_flow_item *item;
+	const struct rte_flow_item *item2;
+
+	/* Copy to the merged items */
+	item = &merge_entry->rule.items[index];
+	*item = *item1;
+
+	item2 = item2_start;
+	if (item2_end != NULL) {
+		for (; item2 != item2_end; item2++) {
+			if (item1->type == item2->type) {
+				nfp_ct_merge_item_real(item2, item);
+				return true;
+			}
+		}
+	} else {
+		for (; item2->type != RTE_FLOW_ITEM_TYPE_END; item2++) {
+			if (item1->type == item2->type) {
+				nfp_ct_merge_item_real(item2, item);
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+static void
+nfp_ct_merge_items(struct nfp_ct_merge_entry *merge_entry)
+{
+	uint32_t index = 0;
+	bool is_tun_flow_1;
+	bool is_tun_flow_2;
+	struct rte_flow_item *items1;
+	struct rte_flow_item *items2;
+	struct rte_flow_item *merge_item;
+	const struct rte_flow_item *item;
+	const struct rte_flow_item *inner1 = NULL;
+	const struct rte_flow_item *inner2 = NULL;
+
+	items1 = merge_entry->pre_ct_parent->rule.items;
+	items2 = merge_entry->post_ct_parent->rule.items;
+	is_tun_flow_1 = nfp_flow_inner_item_get(items1, &inner1);
+	is_tun_flow_2 = nfp_flow_inner_item_get(items2, &inner2);
+
+	if (is_tun_flow_1) {
+		if (is_tun_flow_2) {
+			/* Outer layer */
+			for (item = items1; item != inner1; item++, index++) {
+				if (nfp_ct_merge_item(index, item, items2, inner2, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning outer layer items */
+			for (item = items2; item != inner2; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+
+			/* Inner layer */
+			for (item = inner1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				if (nfp_ct_merge_item(index, item, inner2, NULL, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning inner layer items */
+			for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+		} else {
+			for (item = items1; item != inner1; item++, index++) {
+				if (nfp_ct_merge_item(index, item, items2, NULL, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning items */
+			for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+
+			/* Copy the inner layer items */
+			for (item = inner1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+		}
+	} else {
+		if (is_tun_flow_2) {
+			for (item = items1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				if (nfp_ct_merge_item(index, item, items2, inner2, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning items */
+			for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+		} else {
+			for (item = items1; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				if (nfp_ct_merge_item(index, item, items2, NULL, merge_entry))
+					items2++;
+			}
+
+			/* Copy the remainning items */
+			for (item = items2; item->type != RTE_FLOW_ITEM_TYPE_END; item++, index++) {
+				merge_item = &merge_entry->rule.items[index];
+				*merge_item = *item;
+			}
+		}
+	}
+}
+
+static void
+nfp_ct_merge_actions(struct nfp_ct_merge_entry *merge_entry)
+{
+	struct rte_flow_action *action;
+	struct rte_flow_action *merge_actions;
+
+	merge_actions = merge_entry->rule.actions;
+
+	action = merge_entry->pre_ct_parent->rule.actions;
+	for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK ||
+				action->type == RTE_FLOW_ACTION_TYPE_JUMP)
+			continue;
+
+		*merge_actions = *action;
+		merge_actions++;
+	}
+
+	action = merge_entry->post_ct_parent->rule.actions;
+	for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		*merge_actions = *action;
+		merge_actions++;
+	}
+}
+
+static bool
+nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze,
+		struct nfp_ct_flow_entry *pre_ct_entry,
+		struct nfp_ct_flow_entry *post_ct_entry)
+{
+	bool ret;
+	uint64_t new_cookie[2];
+	uint8_t cnt_same_item = 0;
+	uint8_t cnt_same_action = 0;
+	struct nfp_ct_merge_entry *merge_entry;
+
+	if (pre_ct_entry->repr != post_ct_entry->repr)
+		return true;
+
+	ret = nfp_ct_merge_items_check(pre_ct_entry->rule.items,
+			post_ct_entry->rule.items, &cnt_same_item);
+	if (!ret)
+		return true;
+
+	ret = nfp_ct_merge_actions_check(pre_ct_entry->rule.actions,
+			post_ct_entry->rule.items, &cnt_same_action);
+	if (!ret)
+		return true;
+
+	new_cookie[0] = pre_ct_entry->cookie;
+	new_cookie[1] = post_ct_entry->cookie;
+	merge_entry = nfp_ct_merge_table_search(ze, (char *)&new_cookie, sizeof(uint64_t) * 2);
+	if (merge_entry != NULL)
+		return true;
+
+	merge_entry = rte_zmalloc("ct_merge_entry", sizeof(*merge_entry), 0);
+	if (merge_entry == NULL) {
+		PMD_DRV_LOG(ERR, "Malloc memory for ct merge entry failed");
+		return false;
+	}
+
+	merge_entry->ze = ze;
+	merge_entry->pre_ct_parent = pre_ct_entry;
+	merge_entry->post_ct_parent = post_ct_entry;
+	rte_memcpy(merge_entry->cookie, new_cookie, sizeof(new_cookie));
+	merge_entry->rule.items_cnt = pre_ct_entry->rule.items_cnt +
+			post_ct_entry->rule.items_cnt - cnt_same_item - 1;
+	merge_entry->rule.actions_cnt = pre_ct_entry->rule.actions_cnt +
+			post_ct_entry->rule.actions_cnt - cnt_same_action - 1;
+
+	merge_entry->rule.items = rte_zmalloc("ct_flow_item",
+			sizeof(struct rte_flow_item) * merge_entry->rule.items_cnt, 0);
+	if (merge_entry->rule.items == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc items for merged flow");
+		goto merge_exit;
+	}
+
+	merge_entry->rule.actions = rte_zmalloc("ct_flow_action",
+			sizeof(struct rte_flow_action) * merge_entry->rule.actions_cnt, 0);
+	if (merge_entry->rule.actions == NULL) {
+		PMD_DRV_LOG(ERR, "Could not alloc actions for merged flow");
+		goto free_items;
+	}
+
+	nfp_ct_merge_items(merge_entry);
+	nfp_ct_merge_actions(merge_entry);
+
+	/* Add this entry to the pre_ct and post_ct lists */
+	LIST_INSERT_HEAD(&pre_ct_entry->children, merge_entry, pre_ct_list);
+	LIST_INSERT_HEAD(&post_ct_entry->children, merge_entry, post_ct_list);
+
+	ret = nfp_ct_merge_table_add(ze, merge_entry);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Add into ct merge table failed");
+		goto free_actions;
+	}
+
+	return true;
+
+free_actions:
+	rte_free(merge_entry->rule.actions);
+free_items:
+	rte_free(merge_entry->rule.items);
+merge_exit:
+	LIST_REMOVE(merge_entry, post_ct_list);
+	LIST_REMOVE(merge_entry, pre_ct_list);
+	rte_free(merge_entry);
+
+	return ret;
+}
+
+static bool
+nfp_ct_merge_flow_entries(struct nfp_ct_flow_entry *fe,
+		struct nfp_ct_zone_entry *ze_src,
+		struct nfp_ct_zone_entry *ze_dst)
+{
+	bool ret;
+	struct nfp_ct_flow_entry *fe_tmp;
+
+	if (fe->type == CT_TYPE_PRE_CT) {
+		LIST_FOREACH(fe_tmp, &ze_src->post_ct_list, post_ct_list) {
+			ret = nfp_ct_do_flow_merge(ze_dst, fe, fe_tmp);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "Merge for ct pre flow failed");
+				return false;
+			}
+		}
+	} else {
+		LIST_FOREACH(fe_tmp, &ze_src->pre_ct_list, pre_ct_list) {
+			ret = nfp_ct_do_flow_merge(ze_dst, fe_tmp, fe);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "Merge for ct post flow failed");
+				return false;
+			}
+		}
+	}
+
+	return true;
+}
+
+static bool
+nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item,
+		struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		uint64_t cookie)
+{
+	bool ret;
+	struct nfp_flow_priv *priv;
+	struct nfp_ct_zone_entry *ze;
+	struct nfp_ct_flow_entry *fe;
+	const struct ct_data *ct = ct_item->spec;
+
+	priv = representor->app_fw_flower->flow_priv;
+	ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, false);
+	if (ze == NULL) {
+		PMD_DRV_LOG(ERR, "Could not get ct zone entry");
+		return false;
+	}
+
+	/* Add entry to pre_ct_list */
+	fe = nfp_ct_flow_entry_get(ze, representor, items, actions, cookie);
+	if (fe == NULL) {
+		PMD_DRV_LOG(ERR, "Could not get ct flow entry");
+		goto ct_zone_entry_free;
+	}
+
+	fe->type = CT_TYPE_PRE_CT;
+	LIST_INSERT_HEAD(&ze->pre_ct_list, fe, pre_ct_list);
+
+	ret = nfp_ct_merge_flow_entries(fe, ze, ze);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Merge ct flow entries failed");
+		goto ct_flow_entry_free;
+	}
+
+	/* Need to check and merge with tables in the wc_zone as well */
+	if (priv->ct_zone_wc != NULL) {
+		ret = nfp_ct_merge_flow_entries(fe, priv->ct_zone_wc, ze);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed");
+			goto ct_flow_entry_free;
+		}
+	}
+
+	/* The real offload logic comes in next commit, so here just return false for now */
+
+ct_flow_entry_free:
+	nfp_ct_flow_entry_destroy(fe);
+
+ct_zone_entry_free:
+	nfp_ct_zone_entry_free(ze, false);
+
+	return false;
+}
+
+static bool
+nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item,
+		struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		uint64_t cookie)
+{
+	bool ret;
+	void *next_data;
+	uint32_t iter = 0;
+	const void *next_key;
+	bool wildcard = false;
+	struct nfp_flow_priv *priv;
+	struct nfp_ct_zone_entry *ze;
+	struct nfp_ct_flow_entry *fe;
+	const struct ct_data *ct = ct_item->spec;
+	const struct ct_data *ct_mask = ct_item->mask;
+
+	if (ct_mask->ct_zone == 0) {
+		wildcard = true;
+	} else if (ct_mask->ct_zone != UINT16_MAX) {
+		PMD_DRV_LOG(ERR, "Partially wildcard ct_zone is not supported");
+		return false;
+	}
+
+	priv = representor->app_fw_flower->flow_priv;
+	ze = nfp_ct_zone_entry_get(priv, ct->ct_zone, wildcard);
+	if (ze == NULL) {
+		PMD_DRV_LOG(ERR, "Could not get ct zone entry");
+		return false;
+	}
+
+	/* Add entry to post_ct_list */
+	fe = nfp_ct_flow_entry_get(ze, representor, items, actions, cookie);
+	if (fe == NULL) {
+		PMD_DRV_LOG(ERR, "Could not get ct flow entry");
+		goto ct_zone_entry_free;
+	}
+
+	fe->type = CT_TYPE_POST_CT;
+	LIST_INSERT_HEAD(&ze->post_ct_list, fe, post_ct_list);
+
+	if (wildcard) {
+		while (rte_hash_iterate(priv->ct_zone_table, &next_key, &next_data, &iter) >= 0) {
+			ze = (struct nfp_ct_zone_entry *)next_data;
+			ret = nfp_ct_merge_flow_entries(fe, ze, ze);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "Merge ct flow entries wildcast failed");
+				break;
+			}
+		}
+	} else {
+		ret = nfp_ct_merge_flow_entries(fe, ze, ze);
+	}
+
+	if (!ret)
+		goto ct_flow_entry_free;
+
+	/* The real offload logic comes in next commit, so here just return false for now */
+
+ct_flow_entry_free:
+	nfp_ct_flow_entry_destroy(fe);
+
+ct_zone_entry_free:
+	nfp_ct_zone_entry_free(ze, wildcard);
+
+	return false;
+}
+
+struct rte_flow *
+nfp_ct_flow_setup(struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		const struct rte_flow_item *ct_item,
+		bool validate_flag,
+		uint64_t cookie)
+{
+	const struct ct_data *ct;
+
+	if (ct_item == NULL)
+		return NULL;
+
+	ct = ct_item->spec;
+
+	if (is_ct_commit_flow(ct)) {
+		return nfp_flow_process(representor, &items[1], actions,
+				validate_flag, cookie, false);
+	}
+
+	if (is_post_ct_flow(ct)) {
+		if (nfp_flow_handle_post_ct(ct_item, representor, &items[1],
+				actions, cookie)) {
+			return nfp_flow_process(representor, &items[1], actions,
+					validate_flag, cookie, false);
+		}
+
+		PMD_DRV_LOG(ERR, "Handle nfp post ct flow failed.");
+		return NULL;
+	}
+
+	if (is_pre_ct_flow(ct, actions)) {
+		if (nfp_flow_handle_pre_ct(ct_item, representor, &items[1],
+				actions, cookie)) {
+			return nfp_flow_process(representor, &items[1], actions,
+					validate_flag, cookie, false);
+		}
+
+		PMD_DRV_LOG(ERR, "Handle nfp pre ct flow failed.");
+		return NULL;
+	}
+
+	PMD_DRV_LOG(ERR, "Unsupported ct flow type.");
+	return NULL;
+}
diff --git a/drivers/net/nfp/flower/nfp_conntrack.h b/drivers/net/nfp/flower/nfp_conntrack.h
new file mode 100644
index 0000000000..149a3eb040
--- /dev/null
+++ b/drivers/net/nfp/flower/nfp_conntrack.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CONNTRACK_H__
+#define __NFP_CONNTRACK_H__
+
+#include <stdbool.h>
+
+#include <rte_flow.h>
+
+#include "../nfp_flow.h"
+
+struct nfp_ct_map_entry;
+
+struct nfp_ct_zone_entry;
+
+struct nfp_ct_merge_entry;
+
+struct nfp_ct_map_entry *nfp_ct_map_table_search(struct nfp_flow_priv *priv,
+		char *hash_data,
+		uint32_t hash_len);
+
+struct rte_flow *nfp_ct_flow_setup(struct nfp_flower_representor *representor,
+		const struct rte_flow_item items[],
+		const struct rte_flow_action actions[],
+		const struct rte_flow_item *ct_item,
+		bool validate_flag,
+		uint64_t cookie);
+
+#endif /* __NFP_CONNTRACK_H__ */
diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build
index 3912566134..7627c3e3f1 100644
--- a/drivers/net/nfp/meson.build
+++ b/drivers/net/nfp/meson.build
@@ -6,6 +6,7 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
     reason = 'only supported on 64-bit Linux'
 endif
 sources = files(
+        'flower/nfp_conntrack.c',
         'flower/nfp_flower.c',
         'flower/nfp_flower_cmsg.c',
         'flower/nfp_flower_ctrl.c',
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 1bb93bcfb5..16a5c7e055 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -10,6 +10,7 @@
 #include <rte_jhash.h>
 #include <rte_malloc.h>
 
+#include "flower/nfp_conntrack.h"
 #include "flower/nfp_flower_representor.h"
 #include "nfpcore/nfp_rtsym.h"
 #include "nfp_logs.h"
@@ -3748,6 +3749,8 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 		bool validate_flag)
 {
 	uint64_t cookie;
+	const struct rte_flow_item *item;
+	const struct rte_flow_item *ct_item = NULL;
 
 	if (attr->group != 0)
 		PMD_DRV_LOG(INFO, "Pretend we support group attribute.");
@@ -3758,8 +3761,19 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 	if (attr->transfer != 0)
 		PMD_DRV_LOG(INFO, "Pretend we support transfer attribute.");
 
+	for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; ++item) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_CONNTRACK) {
+			ct_item = item;
+			break;
+		}
+	}
+
 	cookie = rte_rand();
 
+	if (ct_item != NULL)
+		return nfp_ct_flow_setup(representor, items, actions,
+				ct_item, validate_flag, cookie);
+
 	return nfp_flow_process(representor, items, actions, validate_flag, cookie, true);
 }
 
@@ -4235,6 +4249,23 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
 	};
 
+	struct rte_hash_parameters ct_zone_hash_params = {
+		.name       = "ct_zone_table",
+		.entries    = 65536,
+		.hash_func  = rte_jhash,
+		.socket_id  = rte_socket_id(),
+		.key_len    = sizeof(uint32_t),
+		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
+	};
+
+	struct rte_hash_parameters ct_map_hash_params = {
+		.name       = "ct_map_table",
+		.hash_func  = rte_jhash,
+		.socket_id  = rte_socket_id(),
+		.key_len    = sizeof(uint32_t),
+		.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
+	};
+
 	ctx_count = nfp_rtsym_read_le(pf_dev->sym_tbl,
 			"CONFIG_FC_HOST_CTX_COUNT", &ret);
 	if (ret < 0) {
@@ -4325,6 +4356,25 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 		goto free_flow_table;
 	}
 
+	/* ct zone table */
+	ct_zone_hash_params.hash_func_init_val = priv->hash_seed;
+	priv->ct_zone_table = rte_hash_create(&ct_zone_hash_params);
+	if (priv->ct_zone_table == NULL) {
+		PMD_INIT_LOG(ERR, "ct zone table creation failed");
+		ret = -ENOMEM;
+		goto free_pre_tnl_table;
+	}
+
+	/* ct map table */
+	ct_map_hash_params.hash_func_init_val = priv->hash_seed;
+	ct_map_hash_params.entries = ctx_count;
+	priv->ct_map_table = rte_hash_create(&ct_map_hash_params);
+	if (priv->ct_map_table == NULL) {
+		PMD_INIT_LOG(ERR, "ct map table creation failed");
+		ret = -ENOMEM;
+		goto free_ct_zone_table;
+	}
+
 	/* ipv4 off list */
 	rte_spinlock_init(&priv->ipv4_off_lock);
 	LIST_INIT(&priv->ipv4_off_list);
@@ -4338,6 +4388,10 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)
 
 	return 0;
 
+free_ct_zone_table:
+	rte_hash_free(priv->ct_zone_table);
+free_pre_tnl_table:
+	rte_hash_free(priv->pre_tun_table);
 free_flow_table:
 	rte_hash_free(priv->flow_table);
 free_mask_table:
@@ -4363,6 +4417,8 @@ nfp_flow_priv_uninit(struct nfp_pf_dev *pf_dev)
 	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
 	priv = app_fw_flower->flow_priv;
 
+	rte_hash_free(priv->ct_map_table);
+	rte_hash_free(priv->ct_zone_table);
 	rte_hash_free(priv->pre_tun_table);
 	rte_hash_free(priv->flow_table);
 	rte_hash_free(priv->mask_table);
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index 817eaecba2..df16cab8b5 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -150,6 +150,10 @@ struct nfp_flow_priv {
 	rte_spinlock_t ipv6_off_lock; /**< Lock the ipv6 off list */
 	/* neighbor next */
 	LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */
+	/* Conntrack */
+	struct rte_hash *ct_zone_table; /**< Hash table to store ct zone entry */
+	struct nfp_ct_zone_entry *ct_zone_wc; /**< The wildcard ct zone entry */
+	struct rte_hash *ct_map_table; /**< Hash table to store ct map entry */
 };
 
 struct rte_flow {
-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 3/4] net/nfp: add call to add and delete the flows to firmware
  2023-10-04  9:35 ` [PATCH v2 " Chaoyong He
  2023-10-04  9:35   ` [PATCH v2 1/4] net/nfp: prepare for the flow merge Chaoyong He
  2023-10-04  9:36   ` [PATCH v2 2/4] net/nfp: add infrastructure for conntrack " Chaoyong He
@ 2023-10-04  9:36   ` Chaoyong He
  2023-10-04  9:36   ` [PATCH v2 4/4] net/nfp: add support for merged flows and conntrack stats Chaoyong He
  2023-10-04 11:55   ` [PATCH v2 0/4] support offload of simple conntrack flow rules Ferruh Yigit
  4 siblings, 0 replies; 12+ messages in thread
From: Chaoyong He @ 2023-10-04  9:36 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

Add the offload call to add and delete the flows to the firmware.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/flower/nfp_conntrack.c | 112 ++++++++++++++++++++++++-
 drivers/net/nfp/flower/nfp_conntrack.h |   5 ++
 drivers/net/nfp/nfp_flow.c             |   8 ++
 3 files changed, 122 insertions(+), 3 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
index 4fa6fdff99..2ea856ebab 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.c
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -9,8 +9,8 @@
 #include <rte_hash.h>
 #include <rte_jhash.h>
 
-#include "../nfp_flow.h"
 #include "../nfp_logs.h"
+#include "nfp_flower_cmsg.h"
 #include "nfp_flower_representor.h"
 
 struct ct_data {
@@ -59,6 +59,7 @@ struct nfp_ct_merge_entry {
 	LIST_ENTRY(nfp_ct_merge_entry) pre_ct_list;
 	LIST_ENTRY(nfp_ct_merge_entry) post_ct_list;
 	struct nfp_initial_flow rule;
+	struct rte_flow *compiled_rule;
 	struct nfp_ct_zone_entry *ze;
 	struct nfp_ct_flow_entry *pre_ct_parent;
 	struct nfp_ct_flow_entry *post_ct_parent;
@@ -974,6 +975,102 @@ nfp_ct_zone_entry_free(struct nfp_ct_zone_entry *ze,
 	}
 }
 
+static int
+nfp_ct_offload_add(struct nfp_flower_representor *repr,
+		struct nfp_ct_merge_entry *merge_entry)
+{
+	int ret;
+	uint64_t cookie;
+	struct rte_flow *nfp_flow;
+	struct nfp_flow_priv *priv;
+	const struct rte_flow_item *items;
+	const struct rte_flow_action *actions;
+
+	cookie = rte_rand();
+	items = merge_entry->rule.items;
+	actions = merge_entry->rule.actions;
+	nfp_flow = nfp_flow_process(repr, items, actions, false, cookie, true);
+	if (nfp_flow == NULL) {
+		PMD_DRV_LOG(ERR, "Process the merged flow rule failed.");
+		return -EINVAL;
+	}
+
+	/* Add the flow to hardware */
+	priv = repr->app_fw_flower->flow_priv;
+	ret = nfp_flower_cmsg_flow_add(repr->app_fw_flower, nfp_flow);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add the merged flow to firmware failed.");
+		goto flow_teardown;
+	}
+
+	/* Add the flow to flow hash table */
+	ret = nfp_flow_table_add(priv, nfp_flow);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Add the merged flow to flow table failed.");
+		goto flow_teardown;
+	}
+
+	merge_entry->compiled_rule = nfp_flow;
+
+	return 0;
+
+flow_teardown:
+	nfp_flow_teardown(priv, nfp_flow, false);
+	nfp_flow_free(nfp_flow);
+
+	return ret;
+}
+
+int
+nfp_ct_offload_del(struct rte_eth_dev *dev,
+		struct nfp_ct_map_entry *me,
+		struct rte_flow_error *error)
+{
+	int ret;
+	struct nfp_ct_flow_entry *fe;
+	struct nfp_ct_merge_entry *m_ent;
+
+	fe = me->fe;
+
+	if (fe->type == CT_TYPE_PRE_CT) {
+		LIST_FOREACH(m_ent, &fe->children, pre_ct_list) {
+			if (m_ent->compiled_rule != NULL) {
+				ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error);
+				if (ret != 0) {
+					PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item");
+					return -EINVAL;
+				}
+				m_ent->compiled_rule = NULL;
+			}
+
+			m_ent->pre_ct_parent = NULL;
+			LIST_REMOVE(m_ent, pre_ct_list);
+			if (m_ent->post_ct_parent == NULL)
+				nfp_ct_merge_entry_destroy(m_ent);
+		}
+	} else {
+		LIST_FOREACH(m_ent, &fe->children, post_ct_list) {
+			if (m_ent->compiled_rule != NULL) {
+				ret = nfp_flow_destroy(dev, m_ent->compiled_rule, error);
+				if (ret != 0) {
+					PMD_DRV_LOG(ERR, "Could not alloc ct_flow_item");
+					return -EINVAL;
+				}
+				m_ent->compiled_rule = NULL;
+			}
+
+			m_ent->post_ct_parent = NULL;
+			LIST_REMOVE(m_ent, post_ct_list);
+			if (m_ent->pre_ct_parent == NULL)
+				nfp_ct_merge_entry_destroy(m_ent);
+		}
+	}
+
+	nfp_ct_flow_entry_destroy_partly(fe);
+
+	return 0;
+}
+
 static inline bool
 is_item_check_pass(const struct rte_flow_item *item1,
 		const struct rte_flow_item *item2,
@@ -1401,8 +1498,17 @@ nfp_ct_do_flow_merge(struct nfp_ct_zone_entry *ze,
 		goto free_actions;
 	}
 
+	/* Send to firmware */
+	ret = nfp_ct_offload_add(pre_ct_entry->repr, merge_entry);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Send the merged flow to firmware failed");
+		goto merge_table_del;
+	}
+
 	return true;
 
+merge_table_del:
+	nfp_ct_merge_table_delete(ze, merge_entry);
 free_actions:
 	rte_free(merge_entry->rule.actions);
 free_items:
@@ -1489,7 +1595,7 @@ nfp_flow_handle_pre_ct(const struct rte_flow_item *ct_item,
 		}
 	}
 
-	/* The real offload logic comes in next commit, so here just return false for now */
+	return true;
 
 ct_flow_entry_free:
 	nfp_ct_flow_entry_destroy(fe);
@@ -1558,7 +1664,7 @@ nfp_flow_handle_post_ct(const struct rte_flow_item *ct_item,
 	if (!ret)
 		goto ct_flow_entry_free;
 
-	/* The real offload logic comes in next commit, so here just return false for now */
+	return true;
 
 ct_flow_entry_free:
 	nfp_ct_flow_entry_destroy(fe);
diff --git a/drivers/net/nfp/flower/nfp_conntrack.h b/drivers/net/nfp/flower/nfp_conntrack.h
index 149a3eb040..2f47280716 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.h
+++ b/drivers/net/nfp/flower/nfp_conntrack.h
@@ -8,6 +8,7 @@
 
 #include <stdbool.h>
 
+#include <ethdev_driver.h>
 #include <rte_flow.h>
 
 #include "../nfp_flow.h"
@@ -22,6 +23,10 @@ struct nfp_ct_map_entry *nfp_ct_map_table_search(struct nfp_flow_priv *priv,
 		char *hash_data,
 		uint32_t hash_len);
 
+int nfp_ct_offload_del(struct rte_eth_dev *dev,
+		struct nfp_ct_map_entry *me,
+		struct rte_flow_error *error);
+
 struct rte_flow *nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 		const struct rte_flow_item items[],
 		const struct rte_flow_action actions[],
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 16a5c7e055..a6439679d3 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -3911,8 +3911,10 @@ nfp_flow_destroy(struct rte_eth_dev *dev,
 		struct rte_flow_error *error)
 {
 	int ret;
+	uint64_t cookie;
 	struct rte_flow *flow_find;
 	struct nfp_flow_priv *priv;
+	struct nfp_ct_map_entry *me;
 	struct nfp_app_fw_flower *app_fw_flower;
 	struct nfp_flower_representor *representor;
 
@@ -3920,6 +3922,12 @@ nfp_flow_destroy(struct rte_eth_dev *dev,
 	app_fw_flower = representor->app_fw_flower;
 	priv = app_fw_flower->flow_priv;
 
+	/* Find the flow in ct_map_table */
+	cookie = rte_be_to_cpu_64(nfp_flow->payload.meta->host_cookie);
+	me = nfp_ct_map_table_search(priv, (char *)&cookie, sizeof(uint64_t));
+	if (me != NULL)
+		return nfp_ct_offload_del(dev, me, error);
+
 	/* Find the flow in flow hash table */
 	flow_find = nfp_flow_table_search(priv, nfp_flow);
 	if (flow_find == NULL) {
-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 4/4] net/nfp: add support for merged flows and conntrack stats
  2023-10-04  9:35 ` [PATCH v2 " Chaoyong He
                     ` (2 preceding siblings ...)
  2023-10-04  9:36   ` [PATCH v2 3/4] net/nfp: add call to add and delete the flows to firmware Chaoyong He
@ 2023-10-04  9:36   ` Chaoyong He
  2023-10-04 11:55   ` [PATCH v2 0/4] support offload of simple conntrack flow rules Ferruh Yigit
  4 siblings, 0 replies; 12+ messages in thread
From: Chaoyong He @ 2023-10-04  9:36 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, Chaoyong He

Adjust the original logic to make it valid for both normal flow
and merged flow.
Add the logic to update conntrack flow stats.
Add the support of conntrack action.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
---
 drivers/net/nfp/flower/nfp_conntrack.c | 54 ++++++++++++++++--
 drivers/net/nfp/flower/nfp_conntrack.h |  3 +
 drivers/net/nfp/nfp_flow.c             | 79 ++++++++++++++++++++++----
 drivers/net/nfp/nfp_flow.h             |  7 ++-
 4 files changed, 126 insertions(+), 17 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c
index 2ea856ebab..aacd4d7dd3 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.c
+++ b/drivers/net/nfp/flower/nfp_conntrack.c
@@ -39,6 +39,7 @@ struct nfp_ct_flow_entry {
 	struct nfp_flower_representor *repr;
 	struct nfp_ct_zone_entry *ze;
 	struct nfp_initial_flow rule;
+	struct nfp_fl_stats stats;
 };
 
 struct nfp_ct_map_entry {
@@ -56,6 +57,7 @@ struct nfp_ct_zone_entry {
 
 struct nfp_ct_merge_entry {
 	uint64_t cookie[2];
+	uint32_t ctx_id;
 	LIST_ENTRY(nfp_ct_merge_entry) pre_ct_list;
 	LIST_ENTRY(nfp_ct_merge_entry) post_ct_list;
 	struct nfp_initial_flow rule;
@@ -989,12 +991,14 @@ nfp_ct_offload_add(struct nfp_flower_representor *repr,
 	cookie = rte_rand();
 	items = merge_entry->rule.items;
 	actions = merge_entry->rule.actions;
-	nfp_flow = nfp_flow_process(repr, items, actions, false, cookie, true);
+	nfp_flow = nfp_flow_process(repr, items, actions, false, cookie, true, true);
 	if (nfp_flow == NULL) {
 		PMD_DRV_LOG(ERR, "Process the merged flow rule failed.");
 		return -EINVAL;
 	}
 
+	merge_entry->ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id);
+
 	/* Add the flow to hardware */
 	priv = repr->app_fw_flower->flow_priv;
 	ret = nfp_flower_cmsg_flow_add(repr->app_fw_flower, nfp_flow);
@@ -1004,7 +1008,7 @@ nfp_ct_offload_add(struct nfp_flower_representor *repr,
 	}
 
 	/* Add the flow to flow hash table */
-	ret = nfp_flow_table_add(priv, nfp_flow);
+	ret = nfp_flow_table_add_merge(priv, nfp_flow);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "Add the merged flow to flow table failed.");
 		goto flow_teardown;
@@ -1692,14 +1696,14 @@ nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 
 	if (is_ct_commit_flow(ct)) {
 		return nfp_flow_process(representor, &items[1], actions,
-				validate_flag, cookie, false);
+				validate_flag, cookie, false, false);
 	}
 
 	if (is_post_ct_flow(ct)) {
 		if (nfp_flow_handle_post_ct(ct_item, representor, &items[1],
 				actions, cookie)) {
 			return nfp_flow_process(representor, &items[1], actions,
-					validate_flag, cookie, false);
+					validate_flag, cookie, false, false);
 		}
 
 		PMD_DRV_LOG(ERR, "Handle nfp post ct flow failed.");
@@ -1710,7 +1714,7 @@ nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 		if (nfp_flow_handle_pre_ct(ct_item, representor, &items[1],
 				actions, cookie)) {
 			return nfp_flow_process(representor, &items[1], actions,
-					validate_flag, cookie, false);
+					validate_flag, cookie, false, false);
 		}
 
 		PMD_DRV_LOG(ERR, "Handle nfp pre ct flow failed.");
@@ -1720,3 +1724,43 @@ nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 	PMD_DRV_LOG(ERR, "Unsupported ct flow type.");
 	return NULL;
 }
+
+static inline void
+nfp_ct_flow_stats_update(struct nfp_flow_priv *priv,
+		struct nfp_ct_merge_entry *m_ent)
+{
+	uint32_t ctx_id;
+	struct nfp_fl_stats *merge_stats;
+
+	ctx_id = m_ent->ctx_id;
+	merge_stats = &priv->stats[ctx_id];
+
+	m_ent->pre_ct_parent->stats.bytes  += merge_stats->bytes;
+	m_ent->pre_ct_parent->stats.pkts   += merge_stats->pkts;
+	m_ent->post_ct_parent->stats.bytes += merge_stats->bytes;
+	m_ent->post_ct_parent->stats.pkts  += merge_stats->pkts;
+
+	merge_stats->bytes = 0;
+	merge_stats->pkts = 0;
+}
+
+struct nfp_fl_stats *
+nfp_ct_flow_stats_get(struct nfp_flow_priv *priv,
+		struct nfp_ct_map_entry *me)
+{
+	struct nfp_ct_merge_entry *m_ent;
+
+	rte_spinlock_lock(&priv->stats_lock);
+
+	if (me->fe->type == CT_TYPE_PRE_CT) {
+		LIST_FOREACH(m_ent, &me->fe->children, pre_ct_list)
+			nfp_ct_flow_stats_update(priv, m_ent);
+	} else {
+		LIST_FOREACH(m_ent, &me->fe->children, post_ct_list)
+			nfp_ct_flow_stats_update(priv, m_ent);
+	}
+
+	rte_spinlock_unlock(&priv->stats_lock);
+
+	return &me->fe->stats;
+}
diff --git a/drivers/net/nfp/flower/nfp_conntrack.h b/drivers/net/nfp/flower/nfp_conntrack.h
index 2f47280716..5abab4e984 100644
--- a/drivers/net/nfp/flower/nfp_conntrack.h
+++ b/drivers/net/nfp/flower/nfp_conntrack.h
@@ -34,4 +34,7 @@ struct rte_flow *nfp_ct_flow_setup(struct nfp_flower_representor *representor,
 		bool validate_flag,
 		uint64_t cookie);
 
+struct nfp_fl_stats *nfp_ct_flow_stats_get(struct nfp_flow_priv *priv,
+		struct nfp_ct_map_entry *me);
+
 #endif /* __NFP_CONNTRACK_H__ */
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index a6439679d3..020e31e9de 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -310,14 +310,14 @@ nfp_check_mask_add(struct nfp_flow_priv *priv,
 		ret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id);
 		if (ret != 0)
 			return false;
-
-		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
 	} else {
 		/* mask entry already exist */
 		mask_entry->ref_cnt++;
 		*mask_id = mask_entry->mask_id;
 	}
 
+	*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
+
 	return true;
 }
 
@@ -349,7 +349,7 @@ nfp_check_mask_remove(struct nfp_flow_priv *priv,
 	return true;
 }
 
-int
+static int
 nfp_flow_table_add(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow)
 {
@@ -396,6 +396,48 @@ nfp_flow_table_search(struct nfp_flow_priv *priv,
 	return flow_find;
 }
 
+int
+nfp_flow_table_add_merge(struct nfp_flow_priv *priv,
+		struct rte_flow *nfp_flow)
+{
+	struct rte_flow *flow_find;
+
+	flow_find = nfp_flow_table_search(priv, nfp_flow);
+	if (flow_find != NULL) {
+		if (nfp_flow->merge_flag || flow_find->merge_flag) {
+			flow_find->merge_flag = true;
+			flow_find->ref_cnt++;
+			return 0;
+		}
+
+		PMD_DRV_LOG(ERR, "Add to flow table failed.");
+		return -EINVAL;
+	}
+
+	return nfp_flow_table_add(priv, nfp_flow);
+}
+
+static int
+nfp_flow_table_delete_merge(struct nfp_flow_priv *priv,
+		struct rte_flow *nfp_flow)
+{
+	struct rte_flow *flow_find;
+
+	flow_find = nfp_flow_table_search(priv, nfp_flow);
+	if (flow_find == NULL) {
+		PMD_DRV_LOG(ERR, "Can't delete a non-existing flow.");
+		return -EINVAL;
+	}
+
+	if (nfp_flow->merge_flag || flow_find->merge_flag) {
+		flow_find->ref_cnt--;
+		if (flow_find->ref_cnt > 0)
+			return 0;
+	}
+
+	return nfp_flow_table_delete(priv, nfp_flow);
+}
+
 static struct rte_flow *
 nfp_flow_alloc(struct nfp_fl_key_ls *key_layer, uint32_t port_id)
 {
@@ -1082,6 +1124,9 @@ nfp_flow_key_layers_calculate_actions(const struct rte_flow_action actions[],
 				return -ENOTSUP;
 			}
 			break;
+		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+			PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_CONNTRACK detected");
+			break;
 		default:
 			PMD_DRV_LOG(ERR, "Action type %d not supported.", action->type);
 			return -ENOTSUP;
@@ -3626,6 +3671,9 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor,
 				return -EINVAL;
 			position += sizeof(struct nfp_fl_act_meter);
 			break;
+		case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+			PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_CONNTRACK");
+			break;
 		default:
 			PMD_DRV_LOG(ERR, "Unsupported action type: %d", action->type);
 			return -ENOTSUP;
@@ -3647,7 +3695,8 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 		const struct rte_flow_action actions[],
 		bool validate_flag,
 		uint64_t cookie,
-		bool install_flag)
+		bool install_flag,
+		bool merge_flag)
 {
 	int ret;
 	char *hash_data;
@@ -3684,6 +3733,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 	}
 
 	nfp_flow->install_flag = install_flag;
+	nfp_flow->merge_flag = merge_flag;
 
 	nfp_flow_compile_metadata(priv, nfp_flow, &key_layer, stats_ctx, cookie);
 
@@ -3717,7 +3767,7 @@ nfp_flow_process(struct nfp_flower_representor *representor,
 
 	/* Find the flow in hash table */
 	flow_find = nfp_flow_table_search(priv, nfp_flow);
-	if (flow_find != NULL) {
+	if (flow_find != NULL && !nfp_flow->merge_flag && !flow_find->merge_flag) {
 		PMD_DRV_LOG(ERR, "This flow is already exist.");
 		if (!nfp_check_mask_remove(priv, mask_data, mask_len,
 				&nfp_flow_meta->flags)) {
@@ -3774,7 +3824,7 @@ nfp_flow_setup(struct nfp_flower_representor *representor,
 		return nfp_ct_flow_setup(representor, items, actions,
 				ct_item, validate_flag, cookie);
 
-	return nfp_flow_process(representor, items, actions, validate_flag, cookie, true);
+	return nfp_flow_process(representor, items, actions, validate_flag, cookie, true, false);
 }
 
 int
@@ -3877,7 +3927,7 @@ nfp_flow_create(struct rte_eth_dev *dev,
 	}
 
 	/* Add the flow to flow hash table */
-	ret = nfp_flow_table_add(priv, nfp_flow);
+	ret = nfp_flow_table_add_merge(priv, nfp_flow);
 	if (ret != 0) {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				NULL, "Add flow to the flow table failed.");
@@ -3988,7 +4038,7 @@ nfp_flow_destroy(struct rte_eth_dev *dev,
 	}
 
 	/* Delete the flow from flow hash table */
-	ret = nfp_flow_table_delete(priv, nfp_flow);
+	ret = nfp_flow_table_delete_merge(priv, nfp_flow);
 	if (ret != 0) {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				NULL, "Delete flow from the flow table failed.");
@@ -4047,10 +4097,12 @@ nfp_flow_stats_get(struct rte_eth_dev *dev,
 		void *data)
 {
 	bool reset;
+	uint64_t cookie;
 	uint32_t ctx_id;
 	struct rte_flow *flow;
 	struct nfp_flow_priv *priv;
 	struct nfp_fl_stats *stats;
+	struct nfp_ct_map_entry *me;
 	struct rte_flow_query_count *query;
 
 	priv = nfp_flow_dev_to_priv(dev);
@@ -4064,8 +4116,15 @@ nfp_flow_stats_get(struct rte_eth_dev *dev,
 	reset = query->reset;
 	memset(query, 0, sizeof(*query));
 
-	ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id);
-	stats = &priv->stats[ctx_id];
+	/* Find the flow in ct_map_table */
+	cookie = rte_be_to_cpu_64(nfp_flow->payload.meta->host_cookie);
+	me = nfp_ct_map_table_search(priv, (char *)&cookie, sizeof(uint64_t));
+	if (me != NULL) {
+		stats = nfp_ct_flow_stats_get(priv, me);
+	} else {
+		ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id);
+		stats = &priv->stats[ctx_id];
+	}
 
 	rte_spinlock_lock(&priv->stats_lock);
 	if (stats->pkts != 0 && stats->bytes != 0) {
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index df16cab8b5..ed06eca371 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -165,7 +165,9 @@ struct rte_flow {
 	uint32_t port_id;
 	bool install_flag;
 	bool tcp_flag;    /**< Used in the SET_TP_* action */
+	bool merge_flag;
 	enum nfp_flow_type type;
+	uint16_t ref_cnt;
 };
 
 /* Forward declaration */
@@ -181,8 +183,9 @@ struct rte_flow *nfp_flow_process(struct nfp_flower_representor *representor,
 		const struct rte_flow_action actions[],
 		bool validate_flag,
 		uint64_t cookie,
-		bool install_flag);
-int nfp_flow_table_add(struct nfp_flow_priv *priv,
+		bool install_flag,
+		bool merge_flag);
+int nfp_flow_table_add_merge(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow);
 int nfp_flow_teardown(struct nfp_flow_priv *priv,
 		struct rte_flow *nfp_flow,
-- 
2.39.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 0/4] support offload of simple conntrack flow rules
  2023-10-04  9:35 ` [PATCH v2 " Chaoyong He
                     ` (3 preceding siblings ...)
  2023-10-04  9:36   ` [PATCH v2 4/4] net/nfp: add support for merged flows and conntrack stats Chaoyong He
@ 2023-10-04 11:55   ` Ferruh Yigit
  4 siblings, 0 replies; 12+ messages in thread
From: Ferruh Yigit @ 2023-10-04 11:55 UTC (permalink / raw)
  To: Chaoyong He; +Cc: oss-drivers, dev

On 10/4/2023 10:35 AM, Chaoyong He wrote:
> This patch series add the support of simple conntrack flow rules offload
> through flower firmware by import the needed data structure and logic of
> flow merge.
> 
> ---
> v2:
> * Fix one mis-spell in comment.
> * Revise logic and document to solve the 'devtools/check-doc-vs-code.sh'
>   warning.
> * Adjust the commit message as the advice of reviewer.
> ---
> 
> Chaoyong He (4):
>   net/nfp: prepare for the flow merge
>   net/nfp: add infrastructure for conntrack flow merge
>   net/nfp: add call to add and delete the flows to firmware
>   net/nfp: add support for merged flows and conntrack stats
> 
>  

Series applied to dpdk-next-net/main, thanks.


^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2023-10-04 11:55 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-30 10:00 [PATCH 0/4] support offload of simple conntrack flow rules Chaoyong He
2023-09-30 10:00 ` [PATCH 1/4] net/nfp: prepare for the flow merge Chaoyong He
2023-09-30 10:00 ` [PATCH 2/4] net/nfp: add infrastructure for ct " Chaoyong He
2023-09-30 10:00 ` [PATCH 3/4] net/nfp: add call to add and delete the flows to firmware Chaoyong He
2023-09-30 10:00 ` [PATCH 4/4] net/nfp: add support for merged flows and conntrack stats Chaoyong He
2023-10-03 12:46 ` [PATCH 0/4] support offload of simple conntrack flow rules Ferruh Yigit
2023-10-04  9:35 ` [PATCH v2 " Chaoyong He
2023-10-04  9:35   ` [PATCH v2 1/4] net/nfp: prepare for the flow merge Chaoyong He
2023-10-04  9:36   ` [PATCH v2 2/4] net/nfp: add infrastructure for conntrack " Chaoyong He
2023-10-04  9:36   ` [PATCH v2 3/4] net/nfp: add call to add and delete the flows to firmware Chaoyong He
2023-10-04  9:36   ` [PATCH v2 4/4] net/nfp: add support for merged flows and conntrack stats Chaoyong He
2023-10-04 11:55   ` [PATCH v2 0/4] support offload of simple conntrack flow rules Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).