From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 8D5DA425A3;
	Fri, 15 Sep 2023 11:02:16 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id B855D40A84;
	Fri, 15 Sep 2023 11:02:01 +0200 (CEST)
Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.120])
 by mails.dpdk.org (Postfix) with ESMTP id 9F0B0406B6
 for <dev@dpdk.org>; Fri, 15 Sep 2023 11:01:59 +0200 (CEST)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple;
 d=intel.com; i=@intel.com; q=dns/txt; s=Intel;
 t=1694768519; x=1726304519;
 h=from:to:cc:subject:date:message-id:in-reply-to:
 references:mime-version:content-transfer-encoding;
 bh=NecUuxSYVrj6UQRaosKZu88T9MmDvu5pCsrRFqDyrTk=;
 b=JhRjnAkCLyxo1iqJi+jFn3B15SlC/2ytq7Km7gxWR44J97VB6TtW28Ag
 Y1X4FU2hzRlW48JrAe4Gv92mFgHsgrVF96Y2D4fD5uRaNizj8kUHY0tgO
 SbqvoCQa3F82XJkJDT0HFMPdRBaQp3MLFWJoLKYYosIxCun3F51EjOMPI
 NLmQysjgPt10BIiGag+84ha1+JGGJ0hWz1HAcoghxR1N+vfICAp8DkEFr
 Fo63eRv1mAltGH9cUwSyXM1qAKovWh27mjIXRz7VN1hKxXfbfae0sn55U
 Sd5M7hSZ9OKLWtaqpVkuVfrcATRDIg5h6SGVGvIvekTF31q4u35w/ZwhF g==;
X-IronPort-AV: E=McAfee;i="6600,9927,10833"; a="378117462"
X-IronPort-AV: E=Sophos;i="6.02,148,1688454000"; d="scan'208";a="378117462"
Received: from orsmga004.jf.intel.com ([10.7.209.38])
 by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 15 Sep 2023 02:01:59 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=McAfee;i="6600,9927,10833"; a="868630853"
X-IronPort-AV: E=Sophos;i="6.02,148,1688454000"; d="scan'208";a="868630853"
Received: from dpdk-pengyuan-mev.sh.intel.com ([10.67.119.128])
 by orsmga004.jf.intel.com with ESMTP; 15 Sep 2023 02:01:56 -0700
From: "Zhang, Yuying" <yuying.zhang@intel.com>
To: yuying.zhang@intel.com, dev@dpdk.org, qi.z.zhang@intel.com,
 beilei.xing@intel.com, jingjing.wu@intel.com
Cc: mingxia.liu@intel.com
Subject: [PATCH v5 3/9] net/cpfl: set up rte flow skeleton
Date: Fri, 15 Sep 2023 10:00:41 +0000
Message-Id: <20230915100047.90153-4-yuying.zhang@intel.com>
X-Mailer: git-send-email 2.25.1
In-Reply-To: <20230915100047.90153-1-yuying.zhang@intel.com>
References: <20230906093407.3635038-1-wenjing.qiao@intel.com>
 <20230915100047.90153-1-yuying.zhang@intel.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 54ae127cc3..44418ce325 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 1e0a1b0290..9f1818f8dc 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if js_dep.found()
         message('json-c lib version is too low')
     else
         sources += files(
+		'cpfl_flow.c',
                 'cpfl_flow_parser.c',
         )
         dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
-- 
2.34.1